summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.mailmap7
-rw-r--r--.testr.conf4
-rw-r--r--CONTRIBUTING.rst17
-rw-r--r--HACKING.rst3
-rw-r--r--README.rst6
-rwxr-xr-xbin/nova-all5
-rwxr-xr-xbin/nova-api5
-rwxr-xr-xbin/nova-api-ec21
-rwxr-xr-xbin/nova-api-metadata1
-rwxr-xr-xbin/nova-api-os-compute1
-rwxr-xr-xbin/nova-baremetal-deploy-helper318
-rwxr-xr-xbin/nova-baremetal-manage234
-rwxr-xr-xbin/nova-cells53
-rwxr-xr-xbin/nova-cert5
-rwxr-xr-xbin/nova-clear-rabbit-queues23
-rwxr-xr-xbin/nova-compute8
-rwxr-xr-xbin/nova-conductor52
-rwxr-xr-xbin/nova-console5
-rwxr-xr-xbin/nova-consoleauth6
-rwxr-xr-xbin/nova-dhcpbridge51
-rwxr-xr-xbin/nova-manage485
-rwxr-xr-xbin/nova-network5
-rwxr-xr-xbin/nova-novncproxy11
-rwxr-xr-xbin/nova-objectstore1
-rwxr-xr-xbin/nova-rootwrap83
-rwxr-xr-xbin/nova-rpc-zmq-receiver4
-rwxr-xr-xbin/nova-scheduler5
-rwxr-xr-xbin/nova-xvpvncproxy1
-rwxr-xr-xcontrib/openstack-config65
-rw-r--r--doc/api_samples/OS-DCF/image-get-resp.json34
-rw-r--r--doc/api_samples/OS-DCF/image-get-resp.xml12
-rw-r--r--doc/api_samples/OS-DCF/image-list-resp.json214
-rw-r--r--doc/api_samples/OS-DCF/image-list-resp.xml71
-rw-r--r--doc/api_samples/OS-DCF/list-servers-detail-get.json57
-rw-r--r--doc/api_samples/OS-DCF/list-servers-detail-get.xml21
-rw-r--r--doc/api_samples/OS-DCF/server-action-rebuild-req.json6
-rw-r--r--doc/api_samples/OS-DCF/server-action-rebuild-req.xml6
-rw-r--r--doc/api_samples/OS-DCF/server-action-rebuild-resp.json56
-rw-r--r--doc/api_samples/OS-DCF/server-action-rebuild-resp.xml19
-rw-r--r--doc/api_samples/OS-DCF/server-get-resp.json55
-rw-r--r--doc/api_samples/OS-DCF/server-get-resp.xml19
-rw-r--r--doc/api_samples/OS-DCF/server-post-req.json16
-rw-r--r--doc/api_samples/OS-DCF/server-post-req.xml19
-rw-r--r--doc/api_samples/OS-DCF/server-post-resp.json17
-rw-r--r--doc/api_samples/OS-DCF/server-post-resp.xml6
-rw-r--r--doc/api_samples/OS-DCF/server-resize-post-req.json6
-rw-r--r--doc/api_samples/OS-DCF/server-resize-post-req.xml6
-rw-r--r--doc/api_samples/OS-DCF/server-update-put-req.json5
-rw-r--r--doc/api_samples/OS-DCF/server-update-put-req.xml5
-rw-r--r--doc/api_samples/OS-DCF/server-update-put-resp.json55
-rw-r--r--doc/api_samples/OS-DCF/server-update-put-resp.xml19
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json16
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml6
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json16
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml6
-rw-r--r--doc/api_samples/OS-EXT-STS/server-post-req.json16
-rw-r--r--doc/api_samples/OS-EXT-STS/server-post-req.xml19
-rw-r--r--doc/api_samples/OS-EXT-STS/server-post-resp.json16
-rw-r--r--doc/api_samples/OS-EXT-STS/server-post-resp.xml6
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-detail-resp.json59
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-detail-resp.xml21
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-list-resp.json18
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-list-resp.xml7
-rw-r--r--doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json94
-rw-r--r--doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml23
-rw-r--r--doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json20
-rw-r--r--doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml5
-rw-r--r--doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json12
-rw-r--r--doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml4
-rw-r--r--doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml2
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json168
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml111
-rw-r--r--doc/api_samples/all_extensions/flavor-get-resp.json2
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.json16
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.xml6
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.json17
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.xml6
-rw-r--r--doc/api_samples/limit-get-resp.json33
-rw-r--r--doc/api_samples/limit-get-resp.xml23
-rw-r--r--doc/api_samples/os-agents/agent-post-req.json10
-rw-r--r--doc/api_samples/os-agents/agent-post-req.xml9
-rw-r--r--doc/api_samples/os-agents/agent-post-resp.json11
-rw-r--r--doc/api_samples/os-agents/agent-post-resp.xml10
-rw-r--r--doc/api_samples/os-agents/agent-update-put-req.json7
-rw-r--r--doc/api_samples/os-agents/agent-update-put-req.xml6
-rw-r--r--doc/api_samples/os-agents/agent-update-put-resp.json8
-rw-r--r--doc/api_samples/os-agents/agent-update-put-resp.xml7
-rw-r--r--doc/api_samples/os-agents/agents-get-resp.json13
-rw-r--r--doc/api_samples/os-agents/agents-get-resp.xml4
-rw-r--r--doc/api_samples/os-aggregates/aggregate-update-post-resp.json8
-rw-r--r--doc/api_samples/os-aggregates/aggregate-update-post-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-get-resp.json8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-get-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-list-get-resp.json6
-rw-r--r--doc/api_samples/os-aggregates/aggregates-list-get-resp.xml8
-rw-r--r--doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json3
-rw-r--r--doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml3
-rw-r--r--doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json6
-rw-r--r--doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml6
-rw-r--r--doc/api_samples/os-cells/cells-get-resp.json9
-rw-r--r--doc/api_samples/os-cells/cells-get-resp.xml2
-rw-r--r--doc/api_samples/os-cells/cells-list-empty-resp.json3
-rw-r--r--doc/api_samples/os-cells/cells-list-empty-resp.xml2
-rw-r--r--doc/api_samples/os-cells/cells-list-resp.json39
-rw-r--r--doc/api_samples/os-cells/cells-list-resp.xml8
-rw-r--r--doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json6
-rw-r--r--doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml5
-rw-r--r--doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json4
-rw-r--r--doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json16
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-req.json5
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-req.xml2
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-resp.json6
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-resp.xml5
-rw-r--r--doc/api_samples/os-consoles/server-post-req.json16
-rw-r--r--doc/api_samples/os-consoles/server-post-req.xml19
-rw-r--r--doc/api_samples/os-consoles/server-post-resp.json16
-rw-r--r--doc/api_samples/os-consoles/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-coverage/coverage-report-post-req.json5
-rw-r--r--doc/api_samples/os-coverage/coverage-report-post-req.xml4
-rw-r--r--doc/api_samples/os-coverage/coverage-report-post-resp.json3
-rw-r--r--doc/api_samples/os-coverage/coverage-report-post-resp.xml2
-rw-r--r--doc/api_samples/os-coverage/coverage-start-combine-post-req.json5
-rw-r--r--doc/api_samples/os-coverage/coverage-start-combine-post-req.xml4
-rw-r--r--doc/api_samples/os-coverage/coverage-start-post-req.json4
-rw-r--r--doc/api_samples/os-coverage/coverage-start-post-req.xml2
-rw-r--r--doc/api_samples/os-coverage/coverage-stop-post-req.json4
-rw-r--r--doc/api_samples/os-coverage/coverage-stop-post-req.xml2
-rw-r--r--doc/api_samples/os-coverage/coverage-stop-post-resp.json3
-rw-r--r--doc/api_samples/os-coverage/coverage-stop-post-resp.xml2
-rw-r--r--doc/api_samples/os-coverage/coverage-xml-report-post-req.json6
-rw-r--r--doc/api_samples/os-coverage/coverage-xml-report-post-req.xml5
-rw-r--r--doc/api_samples/os-coverage/coverage-xml-report-post-resp.json3
-rw-r--r--doc/api_samples/os-coverage/coverage-xml-report-post-resp.xml2
-rw-r--r--doc/api_samples/os-deferred-delete/force-delete-post-req.json3
-rw-r--r--doc/api_samples/os-deferred-delete/force-delete-post-req.xml2
-rw-r--r--doc/api_samples/os-deferred-delete/restore-post-req.json3
-rw-r--r--doc/api_samples/os-deferred-delete/restore-post-req.xml2
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-req.json16
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-req.xml19
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-resp.json16
-rw-r--r--doc/api_samples/os-deferred-delete/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-fixed-ips/fixedip-post-req.json3
-rw-r--r--doc/api_samples/os-fixed-ips/fixedip-post-req.xml2
-rw-r--r--doc/api_samples/os-fixed-ips/fixedips-get-resp.json8
-rw-r--r--doc/api_samples/os-fixed-ips/fixedips-get-resp.xml7
-rw-r--r--doc/api_samples/os-flavor-manage/flavor-create-post-req.json9
-rw-r--r--doc/api_samples/os-flavor-manage/flavor-create-post-req.xml8
-rw-r--r--doc/api_samples/os-flavor-manage/flavor-create-post-resp.json19
-rw-r--r--doc/api_samples/os-flavor-manage/flavor-create-post-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json20
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json94
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml23
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json10
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml9
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json20
-rw-r--r--doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json20
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json94
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml23
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-req.json10
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml9
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json20
-rw-r--r--doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml5
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json8
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml6
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json7
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml6
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json3
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml2
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json3
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json11
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml10
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json25
-rw-r--r--doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml24
-rw-r--r--doc/api_samples/os-hide-server-addresses/server-get-resp.json54
-rw-r--r--doc/api_samples/os-hide-server-addresses/server-get-resp.xml19
-rw-r--r--doc/api_samples/os-hide-server-addresses/server-post-req.json0
-rw-r--r--doc/api_samples/os-hide-server-addresses/server-post-req.xml0
-rw-r--r--doc/api_samples/os-hide-server-addresses/server-post-resp.json16
-rw-r--r--doc/api_samples/os-hide-server-addresses/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-hide-server-addresses/servers-details-resp.json56
-rw-r--r--doc/api_samples/os-hide-server-addresses/servers-details-resp.xml21
-rw-r--r--doc/api_samples/os-hide-server-addresses/servers-list-resp.json18
-rw-r--r--doc/api_samples/os-hide-server-addresses/servers-list-resp.xml7
-rw-r--r--doc/api_samples/os-hosts/host-get-reboot.json4
-rw-r--r--doc/api_samples/os-hosts/host-get-reboot.xml2
-rw-r--r--doc/api_samples/os-hosts/host-get-resp.json31
-rw-r--r--doc/api_samples/os-hosts/host-get-resp.xml24
-rw-r--r--doc/api_samples/os-hosts/host-get-shutdown.json4
-rw-r--r--doc/api_samples/os-hosts/host-get-shutdown.xml2
-rw-r--r--doc/api_samples/os-hosts/host-get-startup.json4
-rw-r--r--doc/api_samples/os-hosts/host-get-startup.xml2
-rw-r--r--doc/api_samples/os-hosts/host-put-maintenance-req.json4
-rw-r--r--doc/api_samples/os-hosts/host-put-maintenance-req.xml5
-rw-r--r--doc/api_samples/os-hosts/host-put-maintenance-resp.json5
-rw-r--r--doc/api_samples/os-hosts/host-put-maintenance-resp.xml2
-rw-r--r--doc/api_samples/os-hosts/hosts-list-resp.json34
-rw-r--r--doc/api_samples/os-hosts/hosts-list-resp.xml9
-rw-r--r--doc/api_samples/os-networks-associate/network-associate-host-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-associate-host-req.xml2
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-host-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-host-req.xml1
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-project-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-project-req.xml1
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-req.json3
-rw-r--r--doc/api_samples/os-networks-associate/network-disassociate-req.xml1
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json16
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml14
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json15
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml14
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json15
-rw-r--r--doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml14
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json16
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml14
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.json16
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.xml14
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-req.json5
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-req.xml4
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.json15
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.xml14
-rw-r--r--doc/api_samples/os-server-password/get-password-resp.json3
-rw-r--r--doc/api_samples/os-server-password/get-password-resp.xml2
-rw-r--r--doc/api_samples/os-server-password/server-post-req.json16
-rw-r--r--doc/api_samples/os-server-password/server-post-req.xml19
-rw-r--r--doc/api_samples/os-server-password/server-post-resp.json16
-rw-r--r--doc/api_samples/os-server-password/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-req.json16
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-req.xml19
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-resp.json16
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-server-start-stop/server_start_stop.xml2
-rw-r--r--doc/api_samples/os-tenant-networks/networks-list-res.json14
-rw-r--r--doc/api_samples/os-tenant-networks/networks-post-res.json7
-rw-r--r--doc/api_samples/os-used-limits/usedlimits-get-resp.json29
-rw-r--r--doc/api_samples/os-used-limits/usedlimits-get-resp.xml23
-rw-r--r--doc/source/conf.py4
-rw-r--r--doc/source/devref/development.environment.rst2
-rw-r--r--doc/source/devref/filter_scheduler.rst38
-rw-r--r--doc/source/devref/hooks.rst57
-rw-r--r--doc/source/devref/index.rst1
-rw-r--r--doc/source/man/nova-baremetal-deploy-helper.rst52
-rw-r--r--doc/source/man/nova-baremetal-manage.rst67
-rw-r--r--doc/source/man/nova-cert.rst2
-rw-r--r--doc/source/man/nova-conductor.rst45
-rw-r--r--doc/source/man/nova-novncproxy.rst10
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--etc/nova/nova.conf.sample3123
-rw-r--r--etc/nova/policy.json19
-rw-r--r--etc/nova/release.sample4
-rw-r--r--etc/nova/rootwrap.conf20
-rw-r--r--etc/nova/rootwrap.d/api-metadata.filters12
-rw-r--r--etc/nova/rootwrap.d/baremetal-compute-ipmi.filters9
-rw-r--r--etc/nova/rootwrap.d/baremetal-compute-pxe.filters11
-rw-r--r--etc/nova/rootwrap.d/baremetal-deploy-helper.filters10
-rw-r--r--etc/nova/rootwrap.d/compute.filters82
-rw-r--r--etc/nova/rootwrap.d/network.filters24
-rw-r--r--nova/api/auth.py27
-rw-r--r--nova/api/ec2/__init__.py18
-rw-r--r--nova/api/ec2/apirequest.py3
-rw-r--r--nova/api/ec2/cloud.py222
-rw-r--r--nova/api/ec2/ec2utils.py14
-rw-r--r--nova/api/ec2/faults.py5
-rw-r--r--nova/api/manager.py8
-rw-r--r--nova/api/metadata/base.py50
-rw-r--r--nova/api/metadata/handler.py138
-rw-r--r--nova/api/metadata/password.py68
-rw-r--r--nova/api/openstack/__init__.py6
-rw-r--r--nova/api/openstack/auth.py5
-rw-r--r--nova/api/openstack/common.py45
-rw-r--r--nova/api/openstack/compute/__init__.py7
-rw-r--r--nova/api/openstack/compute/consoles.py14
-rw-r--r--nova/api/openstack/compute/contrib/__init__.py13
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py41
-rw-r--r--nova/api/openstack/compute/contrib/agents.py171
-rw-r--r--nova/api/openstack/compute/contrib/aggregates.py11
-rw-r--r--nova/api/openstack/compute/contrib/availability_zone.py2
-rw-r--r--nova/api/openstack/compute/contrib/cells.py303
-rw-r--r--nova/api/openstack/compute/contrib/certificates.py3
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py13
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe_update.py76
-rw-r--r--nova/api/openstack/compute/contrib/config_drive.py5
-rw-r--r--nova/api/openstack/compute/contrib/coverage_ext.py267
-rw-r--r--nova/api/openstack/compute/contrib/createserverext.py2
-rw-r--r--nova/api/openstack/compute/contrib/deferred_delete.py5
-rw-r--r--nova/api/openstack/compute/contrib/disk_config.py2
-rw-r--r--nova/api/openstack/compute/contrib/extended_server_attributes.py1
-rw-r--r--nova/api/openstack/compute/contrib/extended_status.py3
-rw-r--r--nova/api/openstack/compute/contrib/fixed_ips.py98
-rw-r--r--nova/api/openstack/compute/contrib/flavor_access.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavor_disabled.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavor_rxtx.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavor_swap.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavorextradata.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavorextraspecs.py12
-rw-r--r--nova/api/openstack/compute/contrib/floating_ip_dns.py14
-rw-r--r--nova/api/openstack/compute/contrib/floating_ip_pools.py2
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips.py2
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips_bulk.py173
-rw-r--r--nova/api/openstack/compute/contrib/fping.py160
-rw-r--r--nova/api/openstack/compute/contrib/hide_server_addresses.py89
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py361
-rw-r--r--nova/api/openstack/compute/contrib/hypervisors.py2
-rw-r--r--nova/api/openstack/compute/contrib/instance_usage_audit_log.py8
-rw-r--r--nova/api/openstack/compute/contrib/keypairs.py6
-rw-r--r--nova/api/openstack/compute/contrib/multinic.py2
-rw-r--r--nova/api/openstack/compute/contrib/multiple_create.py2
-rw-r--r--nova/api/openstack/compute/contrib/networks_associate.py67
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py (renamed from nova/api/openstack/compute/contrib/networks.py)50
-rw-r--r--nova/api/openstack/compute/contrib/os_tenant_networks.py214
-rw-r--r--nova/api/openstack/compute/contrib/quota_classes.py4
-rw-r--r--nova/api/openstack/compute/contrib/quotas.py4
-rw-r--r--nova/api/openstack/compute/contrib/rescue.py9
-rw-r--r--nova/api/openstack/compute/contrib/scheduler_hints.py2
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py37
-rw-r--r--nova/api/openstack/compute/contrib/server_diagnostics.py2
-rw-r--r--nova/api/openstack/compute/contrib/server_password.py87
-rw-r--r--nova/api/openstack/compute/contrib/server_start_stop.py4
-rw-r--r--nova/api/openstack/compute/contrib/services.py144
-rw-r--r--nova/api/openstack/compute/contrib/simple_tenant_usage.py7
-rw-r--r--nova/api/openstack/compute/contrib/used_limits.py17
-rw-r--r--nova/api/openstack/compute/contrib/user_data.py2
-rw-r--r--nova/api/openstack/compute/contrib/virtual_interfaces.py2
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py34
-rw-r--r--nova/api/openstack/compute/extensions.py13
-rw-r--r--nova/api/openstack/compute/image_metadata.py5
-rw-r--r--nova/api/openstack/compute/images.py1
-rw-r--r--nova/api/openstack/compute/ips.py1
-rw-r--r--nova/api/openstack/compute/limits.py1
-rw-r--r--nova/api/openstack/compute/server_metadata.py8
-rw-r--r--nova/api/openstack/compute/servers.py97
-rw-r--r--nova/api/openstack/compute/versions.py4
-rw-r--r--nova/api/openstack/compute/views/addresses.py1
-rw-r--r--nova/api/openstack/compute/views/images.py11
-rw-r--r--nova/api/openstack/compute/views/limits.py2
-rw-r--r--nova/api/openstack/compute/views/servers.py25
-rw-r--r--nova/api/openstack/compute/views/versions.py8
-rw-r--r--nova/api/openstack/extensions.py1
-rw-r--r--nova/api/openstack/wsgi.py28
-rw-r--r--nova/api/sizelimit.py6
-rw-r--r--nova/api/validator.py2
-rw-r--r--nova/availability_zones.py62
-rw-r--r--nova/block_device.py15
-rw-r--r--nova/cells/__init__.py19
-rw-r--r--nova/cells/driver.py41
-rw-r--r--nova/cells/manager.py231
-rw-r--r--nova/cells/messaging.py1080
-rw-r--r--nova/cells/opts.py44
-rw-r--r--nova/cells/rpc_driver.py165
-rw-r--r--nova/cells/rpcapi.py157
-rw-r--r--nova/cells/scheduler.py136
-rw-r--r--nova/cells/state.py354
-rw-r--r--nova/cells/utils.py48
-rw-r--r--nova/cert/manager.py12
-rw-r--r--nova/cert/rpcapi.py17
-rw-r--r--nova/cloudpipe/pipelib.py45
-rw-r--r--nova/common/memorycache.py20
-rw-r--r--nova/compute/__init__.py27
-rw-r--r--nova/compute/api.py587
-rw-r--r--nova/compute/cells_api.py532
-rw-r--r--nova/compute/claims.py38
-rw-r--r--nova/compute/instance_types.py42
-rw-r--r--nova/compute/manager.py1536
-rw-r--r--nova/compute/resource_tracker.py343
-rw-r--r--nova/compute/rpcapi.py78
-rw-r--r--nova/compute/stats.py8
-rw-r--r--nova/compute/task_states.py2
-rw-r--r--nova/compute/utils.py50
-rw-r--r--nova/conductor/__init__.py26
-rw-r--r--nova/conductor/api.py520
-rw-r--r--nova/conductor/manager.py293
-rw-r--r--nova/conductor/rpcapi.py299
-rw-r--r--nova/config.py11
-rw-r--r--nova/console/api.py11
-rw-r--r--nova/console/manager.py11
-rw-r--r--nova/console/rpcapi.py17
-rw-r--r--nova/console/vmrc.py4
-rw-r--r--nova/console/vmrc_manager.py8
-rw-r--r--nova/console/xvp.py20
-rw-r--r--nova/consoleauth/__init__.py4
-rw-r--r--nova/consoleauth/manager.py22
-rw-r--r--nova/consoleauth/rpcapi.py10
-rw-r--r--nova/context.py21
-rw-r--r--nova/crypto.py31
-rw-r--r--nova/db/api.py655
-rw-r--r--nova/db/base.py8
-rw-r--r--nova/db/migration.py2
-rw-r--r--nova/db/sqlalchemy/api.py2106
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py63
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py42
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py58
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql97
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql97
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py117
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py237
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql226
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql226
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py205
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py73
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py54
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py54
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py94
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql133
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql132
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py145
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py106
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql64
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql64
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py43
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py43
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py70
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql50
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql50
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py62
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py63
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql53
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql52
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py189
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py69
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py108
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql85
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql85
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql71
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql71
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py108
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py94
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py98
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py71
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py59
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py59
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py76
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py68
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py96
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py57
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py107
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py48
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/082_essex.py)748
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py55
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py)31
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py)16
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py)21
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql239
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql239
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py)14
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/141_update_migrations_instance_uuid.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py)16
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/142_add_migrations_instance_status_index.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py)22
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/143_rename_instance_info_cache_sequence.py65
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py185
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py69
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py)39
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py83
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py101
-rw-r--r--nova/db/sqlalchemy/migration.py3
-rw-r--r--nova/db/sqlalchemy/models.py242
-rw-r--r--nova/db/sqlalchemy/session.py290
-rw-r--r--nova/exception.py243
-rw-r--r--nova/filters.py53
-rw-r--r--nova/flags.py380
-rw-r--r--nova/hooks.py96
-rw-r--r--nova/image/glance.py50
-rw-r--r--nova/image/s3.py14
-rw-r--r--nova/ipv6/account_identifier.py2
-rw-r--r--nova/ipv6/api.py4
-rw-r--r--nova/ipv6/rfc2462.py2
-rw-r--r--nova/loadables.py116
-rw-r--r--nova/locale/nova.pot4890
-rw-r--r--nova/manager.py105
-rw-r--r--nova/netconf.py62
-rw-r--r--nova/network/__init__.py15
-rw-r--r--nova/network/api.py85
-rw-r--r--nova/network/dns_driver.py18
-rw-r--r--nova/network/driver.py44
-rw-r--r--nova/network/l3.py15
-rw-r--r--nova/network/ldapdns.py20
-rw-r--r--nova/network/linux_net.py310
-rw-r--r--nova/network/manager.py418
-rw-r--r--nova/network/minidns.py84
-rw-r--r--nova/network/model.py43
-rw-r--r--nova/network/noop_dns_driver.py49
-rw-r--r--nova/network/nova_ipam_lib.py2
-rw-r--r--nova/network/quantum/nova_ipam_lib.py274
-rw-r--r--nova/network/quantumv2/__init__.py17
-rw-r--r--nova/network/quantumv2/api.py80
-rw-r--r--nova/network/rpcapi.py56
-rw-r--r--nova/notifications.py10
-rw-r--r--nova/objectstore/s3server.py7
-rw-r--r--nova/openstack/common/cfg.py464
-rw-r--r--nova/openstack/common/cliutils.py66
-rw-r--r--nova/openstack/common/eventlet_backdoor.py (renamed from nova/common/eventlet_backdoor.py)39
-rw-r--r--nova/openstack/common/gettextutils.py2
-rw-r--r--nova/openstack/common/lockutils.py1
-rw-r--r--nova/openstack/common/log.py19
-rw-r--r--nova/openstack/common/notifier/api.py7
-rw-r--r--nova/openstack/common/notifier/rabbit_notifier.py31
-rw-r--r--nova/openstack/common/notifier/rpc_notifier.py46
-rw-r--r--nova/openstack/common/notifier/rpc_notifier2.py51
-rw-r--r--nova/openstack/common/rpc/__init__.py22
-rw-r--r--nova/openstack/common/rpc/amqp.py56
-rw-r--r--nova/openstack/common/rpc/common.py191
-rw-r--r--nova/openstack/common/rpc/dispatcher.py28
-rw-r--r--nova/openstack/common/rpc/impl_fake.py11
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py42
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py126
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py32
-rw-r--r--nova/openstack/common/rpc/matchmaker.py2
-rw-r--r--nova/openstack/common/rpc/service.py75
-rw-r--r--nova/openstack/common/setup.py16
-rw-r--r--nova/openstack/common/timeutils.py33
-rw-r--r--nova/openstack/common/uuidutils.py4
-rw-r--r--nova/paths.py68
-rw-r--r--nova/policy.py6
-rw-r--r--nova/quota.py291
-rw-r--r--nova/rootwrap/filters.py53
-rw-r--r--nova/rootwrap/wrapper.py101
-rw-r--r--nova/scheduler/baremetal_host_manager.py71
-rw-r--r--nova/scheduler/chance.py8
-rw-r--r--nova/scheduler/driver.py63
-rw-r--r--nova/scheduler/filter_scheduler.py160
-rw-r--r--nova/scheduler/filters/__init__.py76
-rw-r--r--nova/scheduler/filters/availability_zone_filter.py23
-rw-r--r--nova/scheduler/filters/compute_capabilities_filter.py17
-rw-r--r--nova/scheduler/filters/compute_filter.py20
-rw-r--r--nova/scheduler/filters/core_filter.py6
-rw-r--r--nova/scheduler/filters/disk_filter.py8
-rw-r--r--nova/scheduler/filters/image_props_filter.py2
-rw-r--r--nova/scheduler/filters/io_ops_filter.py6
-rw-r--r--nova/scheduler/filters/isolated_hosts_filter.py14
-rw-r--r--nova/scheduler/filters/json_filter.py2
-rw-r--r--nova/scheduler/filters/num_instances_filter.py6
-rw-r--r--nova/scheduler/filters/ram_filter.py6
-rw-r--r--nova/scheduler/filters/retry_filter.py15
-rw-r--r--nova/scheduler/filters/trusted_filter.py4
-rw-r--r--nova/scheduler/host_manager.py177
-rw-r--r--nova/scheduler/least_cost.py118
-rw-r--r--nova/scheduler/manager.py29
-rw-r--r--nova/scheduler/multi.py4
-rw-r--r--nova/scheduler/rpcapi.py28
-rw-r--r--nova/scheduler/scheduler_options.py4
-rw-r--r--nova/scheduler/weights/__init__.py61
-rw-r--r--nova/scheduler/weights/least_cost.py125
-rw-r--r--nova/scheduler/weights/ram.py45
-rw-r--r--nova/service.py169
-rw-r--r--nova/servicegroup/__init__.py22
-rw-r--r--nova/servicegroup/api.py137
-rw-r--r--nova/servicegroup/drivers/__init__.py0
-rw-r--r--nova/servicegroup/drivers/db.py101
-rw-r--r--nova/test.py364
-rw-r--r--nova/tests/__init__.py80
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py131
-rw-r--r--nova/tests/api/ec2/test_cloud.py226
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py9
-rw-r--r--nova/tests/api/ec2/test_faults.py4
-rw-r--r--nova/tests/api/ec2/test_middleware.py29
-rw-r--r--nova/tests/api/openstack/common.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py65
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py89
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_agents.py185
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py44
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cells.py396
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py15
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py72
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_coverage_ext.py190
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_createserverext.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_deferred_delete.py13
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_disk_config.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py7
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_status.py7
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fixed_ips.py163
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_access.py21
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_manage.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_swap.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py125
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips.py14
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fping.py94
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py151
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py206
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hypervisors.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py76
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quota_classes.py29
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py31
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_rescue.py7
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py23
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_password.py86
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_start_stop.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py198
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_snapshots.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_used_limits.py39
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py35
-rw-r--r--nova/tests/api/openstack/compute/extensions/foxinsocks.py2
-rw-r--r--nova/tests/api/openstack/compute/test_consoles.py13
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py33
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py24
-rw-r--r--nova/tests/api/openstack/compute/test_image_metadata.py9
-rw-r--r--nova/tests/api/openstack/compute/test_images.py43
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py65
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py104
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py35
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py262
-rw-r--r--nova/tests/api/openstack/compute/test_urlmap.py16
-rw-r--r--nova/tests/api/openstack/compute/test_versions.py60
-rw-r--r--nova/tests/api/openstack/fakes.py10
-rw-r--r--nova/tests/api/openstack/test_common.py53
-rw-r--r--nova/tests/api/openstack/test_faults.py12
-rw-r--r--nova/tests/api/openstack/test_wsgi.py4
-rw-r--r--nova/tests/api/test_auth.py6
-rw-r--r--nova/tests/api/test_sizelimit.py6
-rw-r--r--nova/tests/baremetal/__init__.py15
-rw-r--r--nova/tests/baremetal/db/__init__.py15
-rw-r--r--nova/tests/baremetal/db/base.py50
-rw-r--r--nova/tests/baremetal/db/test_bm_interface.py54
-rw-r--r--nova/tests/baremetal/db/test_bm_node.py145
-rw-r--r--nova/tests/baremetal/db/test_bm_pxe_ip.py93
-rw-r--r--nova/tests/baremetal/db/utils.py81
-rw-r--r--nova/tests/baremetal/test_driver.py185
-rw-r--r--nova/tests/baremetal/test_ipmi.py222
-rw-r--r--nova/tests/baremetal/test_nova_baremetal_manage.py49
-rw-r--r--nova/tests/baremetal/test_proxy_bare_metal.py269
-rw-r--r--nova/tests/baremetal/test_pxe.py532
-rw-r--r--nova/tests/baremetal/test_tilera.py84
-rw-r--r--nova/tests/baremetal/test_utils.py (renamed from nova/virt/vif.py)28
-rw-r--r--nova/tests/baremetal/test_volume_driver.py161
-rw-r--r--nova/tests/cells/__init__.py (renamed from nova/tests/declare_flags.py)10
-rw-r--r--nova/tests/cells/fakes.py202
-rw-r--r--nova/tests/cells/test_cells_manager.py239
-rw-r--r--nova/tests/cells/test_cells_messaging.py960
-rw-r--r--nova/tests/cells/test_cells_rpc_driver.py218
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py226
-rw-r--r--nova/tests/cells/test_cells_scheduler.py206
-rw-r--r--nova/tests/cells/test_cells_utils.py82
-rw-r--r--nova/tests/cert/test_rpcapi.py17
-rw-r--r--nova/tests/compute/fake_resource_tracker.py2
-rw-r--r--nova/tests/compute/test_claims.py55
-rw-r--r--nova/tests/compute/test_compute.py2167
-rw-r--r--nova/tests/compute/test_compute_cells.py179
-rw-r--r--nova/tests/compute/test_compute_utils.py54
-rw-r--r--nova/tests/compute/test_host_api.py126
-rw-r--r--nova/tests/compute/test_multiple_nodes.py99
-rw-r--r--nova/tests/compute/test_resource_tracker.py518
-rw-r--r--nova/tests/compute/test_rpcapi.py36
-rw-r--r--nova/tests/compute/test_stats.py4
-rw-r--r--nova/tests/compute/test_virtapi.py138
-rw-r--r--nova/tests/conductor/__init__.py0
-rw-r--r--nova/tests/conductor/test_conductor.py786
-rw-r--r--nova/tests/conf_fixture.py74
-rw-r--r--nova/tests/console/test_console.py27
-rw-r--r--nova/tests/console/test_rpcapi.py49
-rw-r--r--nova/tests/consoleauth/test_consoleauth.py16
-rw-r--r--nova/tests/consoleauth/test_rpcapi.py17
-rw-r--r--nova/tests/db/fakes.py13
-rw-r--r--nova/tests/fake_flags.py49
-rw-r--r--nova/tests/fake_hosts.py32
-rw-r--r--nova/tests/fake_imagebackend.py2
-rw-r--r--nova/tests/fake_libvirt_utils.py21
-rw-r--r--nova/tests/fake_loadables/__init__.py27
-rw-r--r--nova/tests/fake_loadables/fake_loadable1.py44
-rw-r--r--nova/tests/fake_loadables/fake_loadable2.py39
-rw-r--r--nova/tests/fake_network.py25
-rw-r--r--nova/tests/fake_policy.py (renamed from nova/tests/policy.json)34
-rw-r--r--nova/tests/fake_volume.py10
-rw-r--r--nova/tests/fakeguestfs.py140
-rw-r--r--nova/tests/fakelibvirt.py3
-rw-r--r--nova/tests/hyperv/README.rst83
-rw-r--r--nova/tests/hyperv/basetestcase.py15
-rw-r--r--nova/tests/hyperv/db_fakes.py42
-rw-r--r--nova/tests/hyperv/hypervutils.py31
-rw-r--r--nova/tests/hyperv/mockproxy.py48
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gzbin0 -> 278 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gzbin0 -> 603 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gzbin722 -> 724 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gzbin289 -> 300 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gzbin2797 -> 2806 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gzbin276 -> 441 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gzbin674 -> 756 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gzbin0 -> 308 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gzbin0 -> 634 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gzbin755 -> 753 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gzbin320 -> 331 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gzbin591 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gzbin290 -> 458 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gzbin658 -> 743 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gzbin22780 -> 21875 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gzbin28844 -> 29013 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gzbin0 -> 280 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gzbin0 -> 607 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gzbin292 -> 303 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gzbin2800 -> 2810 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gzbin275 -> 443 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gzbin592 -> 673 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gzbin19845 -> 19822 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gzbin0 -> 272 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gzbin0 -> 598 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gzbin748 -> 750 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gzbin283 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gzbin253 -> 416 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gzbin627 -> 710 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gzbin24040 -> 23205 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gzbin0 -> 277 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gzbin0 -> 606 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gzbin723 -> 720 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gzbin289 -> 300 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gzbin2798 -> 2806 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gzbin275 -> 441 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gzbin671 -> 756 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gzbin29537 -> 29674 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gzbin0 -> 929 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gzbin270 -> 266 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gzbin0 -> 423 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gzbin298 -> 309 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gzbin1013 -> 1465 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gzbin0 -> 415 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gzbin290 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gzbin888 -> 1075 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gzbin0 -> 600 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gzbin717 -> 718 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gzbin284 -> 295 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gzbin254 -> 417 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gzbin626 -> 705 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gzbin23400 -> 22566 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gzbin290 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gzbin1300 -> 1071 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gzbin0 -> 279 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gzbin0 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gzbin603 -> 601 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gzbin290 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gzbin260 -> 424 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gzbin631 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gzbin0 -> 299 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gzbin0 -> 625 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gzbin621 -> 621 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gzbin310 -> 321 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gzbin280 -> 441 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gzbin649 -> 732 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gzbin23876 -> 23107 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gzbin26172 -> 25350 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gzbin0 -> 285 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gzbin0 -> 612 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gzbin728 -> 731 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gzbin296 -> 307 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gzbin266 -> 429 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gzbin638 -> 720 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gzbin23490 -> 22768 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gzbin0 -> 269 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gzbin0 -> 597 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gzbin716 -> 715 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gzbin281 -> 292 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gzbin251 -> 414 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gzbin624 -> 708 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gzbin23350 -> 22557 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gzbin0 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gzbin0 -> 620 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gzbin740 -> 738 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gzbin305 -> 316 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gzbin275 -> 437 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gzbin646 -> 730 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gzbin23323 -> 22712 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gzbin0 -> 599 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gzbin719 -> 718 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gzbin285 -> 296 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gzbin255 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gzbin625 -> 710 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gzbin23258 -> 22466 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gzbin0 -> 287 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gzbin0 -> 617 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gzbin734 -> 735 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gzbin300 -> 311 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gzbin270 -> 434 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gzbin640 -> 726 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gzbin23305 -> 22540 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gzbin0 -> 600 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gzbin718 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gzbin284 -> 295 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gzbin254 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gzbin626 -> 707 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gzbin23962 -> 22780 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gzbin0 -> 343 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gzbin536 -> 536 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gzbin304 -> 315 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gzbin273 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gzbin335 -> 386 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gzbin1382 -> 1642 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gzbin0 -> 345 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gzbin307 -> 318 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gzbin337 -> 388 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gzbin849 -> 1073 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gzbin0 -> 270 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gzbin0 -> 597 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gzbin717 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gzbin282 -> 293 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gzbin252 -> 416 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gzbin623 -> 709 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gzbin23931 -> 22819 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gzbin0 -> 287 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gzbin0 -> 613 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gzbin733 -> 732 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gzbin298 -> 309 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gzbin268 -> 432 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gzbin640 -> 723 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gzbin23341 -> 22530 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gzbin0 -> 270 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gzbin0 -> 594 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gzbin716 -> 715 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gzbin282 -> 293 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gzbin252 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gzbin623 -> 707 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gzbin24291 -> 23017 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gzbin0 -> 272 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gzbin0 -> 600 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gzbin1012 -> 1012 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gzbin416 -> 433 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gzbin254 -> 419 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gzbin667 -> 750 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gzbin0 -> 292 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gzbin0 -> 619 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gzbin1033 -> 1034 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gzbin437 -> 458 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gzbin274 -> 439 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gzbin688 -> 773 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gzbin24794 -> 23801 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gzbin24505 -> 23695 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gzbin0 -> 289 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gzbin0 -> 828 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gzbin0 -> 890 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gzbin0 -> 311 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gzbin0 -> 432 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gzbin0 -> 811 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gzbin0 -> 30294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gzbin0 -> 493 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gzbin0 -> 822 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gzbin0 -> 913 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gzbin0 -> 305 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gzbin0 -> 427 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gzbin0 -> 804 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gzbin0 -> 29760 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gzbin0 -> 280 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gzbin0 -> 607 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gzbin724 -> 725 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gzbin291 -> 302 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gzbin261 -> 424 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gzbin631 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gzbin24716 -> 23822 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gzbin0 -> 285 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gzbin0 -> 616 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gzbin0 -> 731 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gzbin0 -> 308 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gzbin0 -> 430 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gzbin0 -> 721 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gzbin0 -> 23138 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gzbin0 -> 283 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gzbin0 -> 608 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gzbin607 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gzbin294 -> 305 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gzbin264 -> 426 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gzbin635 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gzbin24420 -> 23617 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gzbin0 -> 291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gzbin0 -> 618 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gzbin737 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gzbin302 -> 313 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gzbin271 -> 430 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gzbin558 -> 725 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gzbin17307 -> 21340 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gzbin0 -> 291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gzbin0 -> 616 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gzbin734 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gzbin301 -> 312 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gzbin271 -> 433 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gzbin643 -> 729 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gzbin24133 -> 22722 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gzbin0 -> 271 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gzbin0 -> 598 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gzbin717 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gzbin283 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gzbin253 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gzbin623 -> 710 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gzbin23864 -> 22741 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gzbin0 -> 288 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gzbin0 -> 614 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gzbin735 -> 732 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gzbin299 -> 310 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gzbin269 -> 432 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gzbin640 -> 724 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gzbin23690 -> 22524 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gzbin0 -> 272 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gzbin0 -> 599 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gzbin717 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gzbin283 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gzbin253 -> 416 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gzbin626 -> 707 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gzbin24099 -> 22903 bytes
-rw-r--r--nova/tests/image/fake.py37
-rw-r--r--nova/tests/image/test_glance.py32
-rw-r--r--nova/tests/image/test_s3.py3
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl34
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl12
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl214
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl71
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl57
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl21
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl56
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl55
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl17
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl17
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl55
-rw-r--r--nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl24
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl58
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl94
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl23
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl22
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl22
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/README.rst2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl98
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl38
-rw-r--r--nova/tests/integrated/api_samples/limit-get-resp.json.tpl15
-rw-r--r--nova/tests/integrated/api_samples/limit-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl12
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl13
-rw-r--r--nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl39
-rw-r--r--nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl94
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl23
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl94
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl23
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl20
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl25
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl24
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl54
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl56
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl21
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl31
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl24
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl34
-rw-r--r--nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl15
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl15
-rw-r--r--nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl15
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl9
-rw-r--r--nova/tests/integrated/integrated_helpers.py15
-rw-r--r--nova/tests/integrated/test_api_samples.py1279
-rw-r--r--nova/tests/integrated/test_extensions.py10
-rw-r--r--nova/tests/integrated/test_login.py2
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py56
-rw-r--r--nova/tests/integrated/test_servers.py33
-rw-r--r--nova/tests/integrated/test_xml.py2
-rw-r--r--nova/tests/matchers.py454
-rw-r--r--nova/tests/monkey_patch_example/__init__.py2
-rw-r--r--nova/tests/network/test_api.py43
-rw-r--r--nova/tests/network/test_linux_net.py131
-rw-r--r--nova/tests/network/test_manager.py301
-rw-r--r--nova/tests/network/test_network_info.py26
-rw-r--r--nova/tests/network/test_quantumv2.py68
-rw-r--r--nova/tests/network/test_rpcapi.py43
-rw-r--r--nova/tests/policy_fixture.py44
-rw-r--r--nova/tests/scheduler/fakes.py30
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py101
-rw-r--r--nova/tests/scheduler/test_host_filters.py329
-rw-r--r--nova/tests/scheduler/test_host_manager.py334
-rw-r--r--nova/tests/scheduler/test_least_cost.py113
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py2
-rw-r--r--nova/tests/scheduler/test_rpcapi.py17
-rw-r--r--nova/tests/scheduler/test_scheduler.py280
-rw-r--r--nova/tests/scheduler/test_weights.py117
-rw-r--r--nova/tests/servicegroup/__init__.py13
-rw-r--r--nova/tests/servicegroup/test_db_servicegroup.py143
-rw-r--r--nova/tests/test_api.py35
-rw-r--r--nova/tests/test_bdm.py21
-rw-r--r--nova/tests/test_cinder.py44
-rw-r--r--nova/tests/test_configdrive2.py30
-rw-r--r--nova/tests/test_crypto.py3
-rw-r--r--nova/tests/test_db_api.py1044
-rw-r--r--nova/tests/test_exception.py16
-rw-r--r--nova/tests/test_filters.py125
-rw-r--r--nova/tests/test_flags.py92
-rw-r--r--nova/tests/test_hooks.py87
-rw-r--r--nova/tests/test_hypervapi.py140
-rw-r--r--nova/tests/test_image_utils.py103
-rw-r--r--nova/tests/test_imagebackend.py52
-rw-r--r--nova/tests/test_imagecache.py132
-rw-r--r--nova/tests/test_instance_types.py71
-rw-r--r--nova/tests/test_libvirt.py908
-rw-r--r--nova/tests/test_libvirt_config.py128
-rw-r--r--nova/tests/test_libvirt_utils.py42
-rw-r--r--nova/tests/test_libvirt_vif.py143
-rw-r--r--nova/tests/test_loadables.py113
-rw-r--r--nova/tests/test_matchers.py351
-rw-r--r--nova/tests/test_metadata.py173
-rw-r--r--nova/tests/test_migrations.py343
-rw-r--r--nova/tests/test_misc.py7
-rw-r--r--nova/tests/test_notifications.py16
-rw-r--r--nova/tests/test_nova_manage.py4
-rw-r--r--nova/tests/test_nova_rootwrap.py139
-rw-r--r--nova/tests/test_objectstore.py26
-rw-r--r--nova/tests/test_periodic_tasks.py123
-rw-r--r--nova/tests/test_pipelib.py9
-rw-r--r--nova/tests/test_plugin_api_extensions.py2
-rw-r--r--nova/tests/test_policy.py34
-rw-r--r--nova/tests/test_powervm.py94
-rw-r--r--nova/tests/test_quota.py220
-rw-r--r--nova/tests/test_service.py67
-rw-r--r--nova/tests/test_sqlalchemy.py66
-rw-r--r--nova/tests/test_test.py8
-rw-r--r--nova/tests/test_test_utils.py6
-rw-r--r--nova/tests/test_utils.py116
-rw-r--r--nova/tests/test_versions.py52
-rw-r--r--nova/tests/test_virt.py71
-rw-r--r--nova/tests/test_virt_disk.py198
-rw-r--r--nova/tests/test_virt_disk_vfs_guestfs.py176
-rw-r--r--nova/tests/test_virt_disk_vfs_localfs.py353
-rw-r--r--nova/tests/test_virt_drivers.py61
-rw-r--r--nova/tests/test_vmwareapi.py38
-rw-r--r--nova/tests/test_wsgi.py17
-rw-r--r--nova/tests/test_xenapi.py472
-rw-r--r--nova/tests/utils.py35
-rw-r--r--nova/tests/virt/__init__.py0
-rw-r--r--nova/tests/virt/disk/__init__.py (renamed from nova/tests/runtime_flags.py)9
-rw-r--r--nova/tests/virt/disk/test_api.py60
-rw-r--r--nova/tests/virt/disk/test_loop.py100
-rw-r--r--nova/tests/virt/disk/test_nbd.py292
-rw-r--r--nova/tests/virt/xenapi/__init__.py0
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py89
-rw-r--r--nova/tests/virt/xenapi/test_volumeops.py150
-rw-r--r--nova/tests/vmwareapi/db_fakes.py3
-rw-r--r--nova/tests/vmwareapi/stubs.py14
-rw-r--r--nova/tests/xenapi/stubs.py18
-rw-r--r--nova/tests/xenapi/test_vm_utils.py107
-rw-r--r--nova/utils.py201
-rw-r--r--nova/version.py73
-rw-r--r--nova/virt/baremetal/__init__.py7
-rw-r--r--nova/virt/baremetal/baremetal_states.py32
-rw-r--r--nova/virt/baremetal/base.py78
-rw-r--r--nova/virt/baremetal/db/__init__.py16
-rw-r--r--nova/virt/baremetal/db/api.py181
-rw-r--r--nova/virt/baremetal/db/migration.py38
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/__init__.py14
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py419
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py14
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg20
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py123
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py14
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migration.py113
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/models.py77
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/session.py64
-rw-r--r--nova/virt/baremetal/doc/README.rst69
-rw-r--r--nova/virt/baremetal/dom.py264
-rw-r--r--nova/virt/baremetal/driver.py1035
-rw-r--r--nova/virt/baremetal/fake.py155
-rw-r--r--nova/virt/baremetal/ipmi.py257
-rw-r--r--nova/virt/baremetal/net-dhcp.ubuntu.template21
-rw-r--r--nova/virt/baremetal/net-static.ubuntu.template30
-rw-r--r--nova/virt/baremetal/nodes.py42
-rw-r--r--nova/virt/baremetal/pxe.py449
-rw-r--r--nova/virt/baremetal/pxe_config.template11
-rw-r--r--nova/virt/baremetal/tilera.py365
-rw-r--r--nova/virt/baremetal/utils.py67
-rw-r--r--nova/virt/baremetal/vif_driver.py74
-rw-r--r--nova/virt/baremetal/volume_driver.py267
-rw-r--r--nova/virt/configdrive.py58
-rw-r--r--nova/virt/disk/api.py335
-rw-r--r--nova/virt/disk/guestfs.py121
-rw-r--r--nova/virt/disk/mount/__init__.py19
-rw-r--r--nova/virt/disk/mount/api.py (renamed from nova/virt/disk/mount.py)66
-rw-r--r--nova/virt/disk/mount/loop.py (renamed from nova/virt/disk/loop.py)32
-rw-r--r--nova/virt/disk/mount/nbd.py128
-rw-r--r--nova/virt/disk/nbd.py112
-rw-r--r--nova/virt/disk/vfs/__init__.py19
-rw-r--r--nova/virt/disk/vfs/api.py132
-rw-r--r--nova/virt/disk/vfs/guestfs.py196
-rw-r--r--nova/virt/disk/vfs/localfs.py158
-rw-r--r--nova/virt/driver.py213
-rw-r--r--nova/virt/fake.py143
-rw-r--r--nova/virt/firewall.py63
-rw-r--r--nova/virt/hyperv/baseops.py8
-rw-r--r--nova/virt/hyperv/basevolumeutils.py80
-rw-r--r--nova/virt/hyperv/constants.py27
-rw-r--r--nova/virt/hyperv/driver.py46
-rw-r--r--nova/virt/hyperv/hostops.py87
-rw-r--r--nova/virt/hyperv/livemigrationops.py6
-rw-r--r--nova/virt/hyperv/snapshotops.py12
-rw-r--r--nova/virt/hyperv/vif.py133
-rw-r--r--nova/virt/hyperv/vmops.py352
-rw-r--r--nova/virt/hyperv/vmutils.py60
-rw-r--r--nova/virt/hyperv/volumeops.py42
-rw-r--r--nova/virt/hyperv/volumeutils.py82
-rw-r--r--nova/virt/hyperv/volumeutilsV2.py70
-rw-r--r--nova/virt/images.py9
-rw-r--r--nova/virt/libvirt/config.py133
-rw-r--r--nova/virt/libvirt/designer.py101
-rw-r--r--nova/virt/libvirt/driver.py744
-rw-r--r--nova/virt/libvirt/firewall.py28
-rw-r--r--nova/virt/libvirt/imagebackend.py29
-rw-r--r--nova/virt/libvirt/imagecache.py272
-rw-r--r--nova/virt/libvirt/snapshots.py4
-rw-r--r--nova/virt/libvirt/utils.py214
-rw-r--r--nova/virt/libvirt/vif.py247
-rw-r--r--nova/virt/libvirt/volume.py27
-rw-r--r--nova/virt/libvirt/volume_nfs.py21
-rw-r--r--nova/virt/netutils.py8
-rw-r--r--nova/virt/powervm/blockdev.py425
-rw-r--r--nova/virt/powervm/common.py48
-rw-r--r--nova/virt/powervm/constants.py4
-rw-r--r--nova/virt/powervm/driver.py70
-rw-r--r--nova/virt/powervm/exception.py6
-rw-r--r--nova/virt/powervm/operator.py266
-rw-r--r--nova/virt/storage_users.py63
-rw-r--r--nova/virt/virtapi.py61
-rw-r--r--nova/virt/vmwareapi/__init__.py2
-rw-r--r--nova/virt/vmwareapi/driver.py74
-rw-r--r--nova/virt/vmwareapi/fake.py2
-rw-r--r--nova/virt/vmwareapi/network_util.py (renamed from nova/virt/vmwareapi/network_utils.py)0
-rw-r--r--nova/virt/vmwareapi/read_write_util.py13
-rw-r--r--nova/virt/vmwareapi/vif.py106
-rw-r--r--nova/virt/vmwareapi/vim.py127
-rw-r--r--nova/virt/vmwareapi/vm_util.py2
-rw-r--r--nova/virt/vmwareapi/vmops.py85
-rw-r--r--nova/virt/vmwareapi/vmware_images.py10
-rw-r--r--nova/virt/xenapi/agent.py8
-rw-r--r--nova/virt/xenapi/driver.py172
-rw-r--r--nova/virt/xenapi/fake.py15
-rw-r--r--nova/virt/xenapi/firewall.py24
-rw-r--r--nova/virt/xenapi/host.py57
-rw-r--r--nova/virt/xenapi/pool.py95
-rw-r--r--nova/virt/xenapi/pool_states.py6
-rw-r--r--nova/virt/xenapi/vif.py9
-rw-r--r--nova/virt/xenapi/vm_utils.py264
-rw-r--r--nova/virt/xenapi/vmops.py202
-rw-r--r--nova/virt/xenapi/volume_utils.py80
-rw-r--r--nova/virt/xenapi/volumeops.py141
-rw-r--r--nova/vnc/__init__.py6
-rw-r--r--nova/vnc/xvp_proxy.py6
-rw-r--r--nova/volume/__init__.py15
-rw-r--r--nova/volume/cinder.py49
-rw-r--r--nova/volume/driver.py954
-rw-r--r--nova/volume/iscsi.py235
-rw-r--r--nova/weights.py71
-rw-r--r--nova/wsgi.py26
-rw-r--r--openstack-common.conf2
-rw-r--r--plugins/xenserver/networking/etc/xensource/scripts/novalib.py4
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py2
-rwxr-xr-xrun_tests.sh103
-rw-r--r--setup.cfg7
-rw-r--r--setup.py4
-rw-r--r--smoketests/base.py20
-rw-r--r--smoketests/test_netadmin.py2
-rw-r--r--smoketests/test_sysadmin.py18
-rw-r--r--tools/conf/extract_opts.py146
-rwxr-xr-xtools/db/schema_diff.py2
-rw-r--r--tools/flakes.py22
-rwxr-xr-xtools/hacking.py82
-rw-r--r--tools/install_venv.py3
-rwxr-xr-xtools/lintstack.py2
-rwxr-xr-xtools/lintstack.sh26
-rw-r--r--tools/pip-requires13
-rw-r--r--tools/test-requires15
-rw-r--r--tools/xenserver/destroy_cached_images.py3
-rwxr-xr-xtools/xenserver/vm_vdi_cleaner.py19
-rw-r--r--tox.ini32
1346 files changed, 55474 insertions, 26064 deletions
diff --git a/.gitignore b/.gitignore
index 51ab4183a..efb88c781 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,12 +6,14 @@
*.swo
*.swp
*.sqlite
+*~
.autogenerated
.coverage
.nova-venv
.project
.pydevproject
.ropeproject
+.testrepository/
.tox
.venv
AUTHORS
diff --git a/.mailmap b/.mailmap
index ac6f75f88..f619a4bf0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -37,7 +37,8 @@
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
<johannes.erdfelt@rackspace.com> <johannes@compute3.221.st>
-<josh@jk0.org> <josh.kearney@rackspace.com>
+<josh.kearney@pistoncloud.com> <josh.kearney@rackspace.com>
+<josh.kearney@pistoncloud.com> <josh@jk0.org>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<kshileev@gmail.com> <kshileev@griddynamics.com>
<lorin@nimbisservices.com> <lorin@isi.edu>
@@ -89,6 +90,8 @@ Dan Wendlandt <dan@nicira.com> danwent@gmail.com <dan@nicira.com>
Jake Dahn <jake@ansolabs.com> jakedahn <jake@ansolabs.com>
Jason Koelker <jason@koelker.net> Jason Kölker <jason@koelker.net>
Jay Pipes <jaypipes@gmail.com> jaypipes@gmail.com <>
+Jian Wen <jian.wen@canonical.com> <jian.wen@ubuntu.com>
+Jian Wen <jian.wen@canonical.com> <wenjianhn@gmail.com>
Joel Moore <joelbm24@gmail.com> Joel Moore joelbm24@gmail.com <>
John Griffith <john.griffith@solidfire.com> john-griffith <john.griffith@solidfire.com>
John Tran <jtran@attinteractive.com> John Tran <jhtran@att.com>
@@ -116,4 +119,6 @@ Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com> sateesh <sateesh.chodapun
Vishvananda Ishaya <vishvananda@gmail.com> <root@mirror.nasanebula.net>
Vishvananda Ishaya <vishvananda@gmail.com> <root@ubuntu>
Vivek YS <vivek.ys@gmail.com> Vivek YS vivek.ys@gmail.com <>
+Yaguang Tang <yaguang.tang@canonical.com> <heut2008@gmail.com>
Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
+Ethan Chu <xychu2008@gmail.com> <xchu@redhat.com>
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 000000000..1036ba0ca
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ ./nova/tests $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 000000000..d1ebd5eca
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,17 @@
+If you would like to contribute to the development of OpenStack,
+you must follow the steps in the "If you're a developer, start here"
+section of this page:
+
+ http://wiki.openstack.org/HowToContribute
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+ http://wiki.openstack.org/GerritWorkflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+ https://bugs.launchpad.net/nova
diff --git a/HACKING.rst b/HACKING.rst
index 52dc38bf8..be894f072 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -33,6 +33,7 @@ Imports
- Do not import objects, only modules (*)
- Do not import more than one module per line (*)
- Do not make relative imports
+- Do not make new nova.db imports in nova/virt/*
- Order your imports by the full module path
- Organize your imports according to the following template
@@ -217,7 +218,7 @@ the changes across.
OpenStack Trademark
-------------------
-OpenStack is a registered trademark of OpenStack, LLC, and uses the
+OpenStack is a registered trademark of the OpenStack Foundation, and uses the
following capitalization:
OpenStack
diff --git a/README.rst b/README.rst
index 0e2a216f0..43f6b9d51 100644
--- a/README.rst
+++ b/README.rst
@@ -50,9 +50,7 @@ developer focused documentation is available at:
http://nova.openstack.org/
-Changes to OpenStack Nova should be submitted for review via
-the Gerrit tool, following the workflow documented at:
-
- http://wiki.openstack.org/GerritWorkflow
+For information on how to contribute to Nova, please see the
+contents of the CONTRIBUTING.rst file.
-- End of broadcast
diff --git a/bin/nova-all b/bin/nova-all
index 531116d69..2553f6487 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -41,15 +41,16 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.objectstore import s3server
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('enabled_apis', 'nova.service')
LOG = logging.getLogger('nova.all')
if __name__ == '__main__':
diff --git a/bin/nova-api b/bin/nova-api
index 776152e43..8457ea43d 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -37,12 +37,13 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('enabled_apis', 'nova.service')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2
index f165b5ce9..c7b08845d 100755
--- a/bin/nova-api-ec2
+++ b/bin/nova-api-ec2
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-api-metadata b/bin/nova-api-metadata
index f50e5ce84..e7cac260d 100755
--- a/bin/nova-api-metadata
+++ b/bin/nova-api-metadata
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-api-os-compute b/bin/nova-api-os-compute
index 5cf5f6910..02f16a04a 100755
--- a/bin/nova-api-os-compute
+++ b/bin/nova-api-os-compute
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-baremetal-deploy-helper b/bin/nova-baremetal-deploy-helper
new file mode 100755
index 000000000..f8a487d37
--- /dev/null
+++ b/bin/nova-baremetal-deploy-helper
@@ -0,0 +1,318 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Bare-Metal Deployment Service."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+import threading
+import time
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+import cgi
+import Queue
+import re
+import socket
+import stat
+from wsgiref import simple_server
+
+from nova import config
+from nova import context as nova_context
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.baremetal import db
+
+
+LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
+
+QUEUE = Queue.Queue()
+
+
+# All functions are called from deploy() directly or indirectly.
+# They are split for stub-out.
+
+def discovery(portal_address, portal_port):
+ """Do iSCSI discovery on portal."""
+ utils.execute('iscsiadm',
+ '-m', 'discovery',
+ '-t', 'st',
+ '-p', '%s:%s' % (portal_address, portal_port),
+ run_as_root=True,
+ check_exit_code=[0])
+
+
+def login_iscsi(portal_address, portal_port, target_iqn):
+ """Login to an iSCSI target."""
+ utils.execute('iscsiadm',
+ '-m', 'node',
+ '-p', '%s:%s' % (portal_address, portal_port),
+ '-T', target_iqn,
+ '--login',
+ run_as_root=True,
+ check_exit_code=[0])
+ # Ensure the login complete
+ time.sleep(3)
+
+
+def logout_iscsi(portal_address, portal_port, target_iqn):
+ """Logout from an iSCSI target."""
+ utils.execute('iscsiadm',
+ '-m', 'node',
+ '-p', '%s:%s' % (portal_address, portal_port),
+ '-T', target_iqn,
+ '--logout',
+ run_as_root=True,
+ check_exit_code=[0])
+
+
+def make_partitions(dev, root_mb, swap_mb):
+ """Create partitions for root and swap on a disk device."""
+ commands = ['o,w',
+ 'n,p,1,,+%dM,t,1,83,w' % root_mb,
+ 'n,p,2,,+%dM,t,2,82,w' % swap_mb,
+ ]
+ for command in commands:
+ command = command.replace(',', '\n')
+ utils.execute('fdisk', dev,
+ process_input=command,
+ run_as_root=True,
+ check_exit_code=[0])
+ # avoid "device is busy"
+ time.sleep(3)
+
+
+def is_block_device(dev):
+ """Check whether a device is block or not."""
+ s = os.stat(dev)
+ return stat.S_ISBLK(s.st_mode)
+
+
+def dd(src, dst):
+ """Execute dd from src to dst."""
+ utils.execute('dd',
+ 'if=%s' % src,
+ 'of=%s' % dst,
+ 'bs=1M',
+ run_as_root=True,
+ check_exit_code=[0])
+
+
+def mkswap(dev, label='swap1'):
+ """Execute mkswap on a device."""
+ utils.execute('mkswap',
+ '-L', label,
+ dev,
+ run_as_root=True,
+ check_exit_code=[0])
+
+
+def block_uuid(dev):
+ """Get UUID of a block device."""
+ out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
+ run_as_root=True,
+ check_exit_code=[0])
+ return out.strip()
+
+
+def switch_pxe_config(path, root_uuid):
+ """Switch a pxe config from deployment mode to service mode."""
+ with open(path) as f:
+ lines = f.readlines()
+ root = 'UUID=%s' % root_uuid
+ rre = re.compile(r'\$\{ROOT\}')
+ dre = re.compile('^default .*$')
+ with open(path, 'w') as f:
+ for line in lines:
+ line = rre.sub(root, line)
+ line = dre.sub('default boot', line)
+ f.write(line)
+
+
+def notify(address, port):
+ """Notify a node that it becomes ready to reboot."""
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ s.connect((address, port))
+ s.send('done')
+ finally:
+ s.close()
+
+
+def get_dev(address, port, iqn, lun):
+ """Returns a device path for given parameters."""
+ dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
+ % (address, port, iqn, lun)
+ return dev
+
+
+def get_image_mb(image_path):
+ """Get size of an image in Megabyte."""
+ mb = 1024 * 1024
+ image_byte = os.path.getsize(image_path)
+ # round up size to MB
+ image_mb = int((image_byte + mb - 1) / mb)
+ return image_mb
+
+
+def work_on_disk(dev, root_mb, swap_mb, image_path):
+ """Creates partitions and write an image to the root partition."""
+ root_part = "%s-part1" % dev
+ swap_part = "%s-part2" % dev
+
+ if not is_block_device(dev):
+ LOG.warn("parent device '%s' not found", dev)
+ return
+ make_partitions(dev, root_mb, swap_mb)
+ if not is_block_device(root_part):
+ LOG.warn("root device '%s' not found", root_part)
+ return
+ if not is_block_device(swap_part):
+ LOG.warn("swap device '%s' not found", swap_part)
+ return
+ dd(image_path, root_part)
+ mkswap(swap_part)
+ root_uuid = block_uuid(root_part)
+ return root_uuid
+
+
+def deploy(address, port, iqn, lun, image_path, pxe_config_path,
+ root_mb, swap_mb):
+ """All-in-one function to deploy a node."""
+ dev = get_dev(address, port, iqn, lun)
+ image_mb = get_image_mb(image_path)
+ if image_mb > root_mb:
+ root_mb = image_mb
+ discovery(address, port)
+ login_iscsi(address, port, iqn)
+ try:
+ root_uuid = work_on_disk(dev, root_mb, swap_mb, image_path)
+ finally:
+ logout_iscsi(address, port, iqn)
+ switch_pxe_config(pxe_config_path, root_uuid)
+ # Ensure the node started netcat on the port after POST the request.
+ time.sleep(3)
+ notify(address, 10000)
+
+
+class Worker(threading.Thread):
+ """Thread that handles requests in queue."""
+
+ def __init__(self):
+ super(Worker, self).__init__()
+ self.setDaemon(True)
+ self.stop = False
+ self.queue_timeout = 1
+
+ def run(self):
+ while not self.stop:
+ try:
+ # Set timeout to check self.stop periodically
+ (deployment_id, params) = QUEUE.get(block=True,
+ timeout=self.queue_timeout)
+ except Queue.Empty:
+ pass
+ else:
+ # Requests comes here from BareMetalDeploy.post()
+ LOG.info("start deployment: %s, %s", deployment_id, params)
+ try:
+ deploy(**params)
+ except Exception:
+ LOG.exception('deployment %s failed' % deployment_id)
+ else:
+ LOG.info("deployment %s done", deployment_id)
+ finally:
+ context = nova_context.get_admin_context()
+ db.bm_deployment_destroy(context, deployment_id)
+
+
+class BareMetalDeploy(object):
+ """WSGI server for bare-metal deployment."""
+
+ def __init__(self):
+ self.worker = Worker()
+ self.worker.start()
+
+ def __call__(self, environ, start_response):
+ method = environ['REQUEST_METHOD']
+ if method == 'POST':
+ return self.post(environ, start_response)
+ else:
+ start_response('501 Not Implemented',
+ [('Content-type', 'text/plain')])
+ return 'Not Implemented'
+
+ def post(self, environ, start_response):
+ LOG.info("post: environ=%s", environ)
+ inpt = environ['wsgi.input']
+ length = int(environ.get('CONTENT_LENGTH', 0))
+
+ x = inpt.read(length)
+ q = dict(cgi.parse_qsl(x))
+ try:
+ deployment_id = q['i']
+ deployment_key = q['k']
+ address = q['a']
+ port = q.get('p', '3260')
+ iqn = q['n']
+ lun = q.get('l', '1')
+ except KeyError as e:
+ start_response('400 Bad Request', [('Content-type', 'text/plain')])
+ return "parameter '%s' is not defined" % e
+
+ context = nova_context.get_admin_context()
+ d = db.bm_deployment_get(context, deployment_id)
+
+ if d['key'] != deployment_key:
+ start_response('400 Bad Request', [('Content-type', 'text/plain')])
+ return 'key is not match'
+
+ params = {'address': address,
+ 'port': port,
+ 'iqn': iqn,
+ 'lun': lun,
+ 'image_path': d['image_path'],
+ 'pxe_config_path': d['pxe_config_path'],
+ 'root_mb': int(d['root_mb']),
+ 'swap_mb': int(d['swap_mb']),
+ }
+ # Restart worker, if needed
+ if not self.worker.isAlive():
+ self.worker = Worker()
+ self.worker.start()
+ LOG.info("request is queued: %s, %s", deployment_id, params)
+ QUEUE.put((deployment_id, params))
+ # Requests go to Worker.run()
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ return ''
+
+
+if __name__ == '__main__':
+ config.parse_args(sys.argv)
+ logging.setup("nova")
+ app = BareMetalDeploy()
+ srv = simple_server.make_server('', 10000, app)
+ srv.serve_forever()
diff --git a/bin/nova-baremetal-manage b/bin/nova-baremetal-manage
new file mode 100755
index 000000000..34a98caf2
--- /dev/null
+++ b/bin/nova-baremetal-manage
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Interactive shell based on Django:
+#
+# Copyright (c) 2005, the Lawrence Journal-World
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of Django nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+ CLI interface for nova bare-metal management.
+"""
+
+import ast
+import errno
+import gettext
+import math
+import netaddr
+import optparse
+import os
+import sys
+
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
+ sys.path.insert(0, POSSIBLE_TOPDIR)
+
+gettext.install('nova', unicode=1)
+
+from nova import config
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import cliutils
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+from nova.openstack.common import timeutils
+from nova import utils
+from nova import version
+from nova.virt.baremetal import db as bmdb
+from nova.virt.baremetal.db import migration as bmdb_migration
+
+CONF = cfg.CONF
+
+
+# Decorators for actions
+def args(*args, **kwargs):
+ def _decorator(func):
+ func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
+ return func
+ return _decorator
+
+
+class BareMetalDbCommands(object):
+ """Class for managing the bare-metal database."""
+
+ def __init__(self):
+ pass
+
+ @args('--version', dest='version', metavar='<version>',
+ help='Bare-metal Database version')
+ def sync(self, version=None):
+ """Sync the database up to the most recent version."""
+ bmdb_migration.db_sync(version)
+
+ def version(self):
+ """Print the current database version."""
+ v = bmdb_migration.db_version()
+ print(v)
+ # return for unittest
+ return v
+
+
+CATEGORIES = {
+ 'db': BareMetalDbCommands,
+}
+
+
+def methods_of(obj):
+ """Get all callable methods of an object that don't start with underscore
+ returns a list of tuples of the form (method_name, method)"""
+ result = []
+ for i in dir(obj):
+ if callable(getattr(obj, i)) and not i.startswith('_'):
+ result.append((i, getattr(obj, i)))
+ return result
+
+
+def add_command_parsers(subparsers):
+ parser = subparsers.add_parser('bash-completion')
+ parser.add_argument('query_category', nargs='?')
+
+ for category in CATEGORIES:
+ command_object = CATEGORIES[category]()
+
+ parser = subparsers.add_parser(category)
+ parser.set_defaults(command_object=command_object)
+
+ category_subparsers = parser.add_subparsers(dest='action')
+
+ for (action, action_fn) in methods_of(command_object):
+ parser = category_subparsers.add_parser(action)
+
+ action_kwargs = []
+ for args, kwargs in getattr(action_fn, 'args', []):
+ action_kwargs.append(kwargs['dest'])
+ kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
+ parser.add_argument(*args, **kwargs)
+
+ parser.set_defaults(action_fn=action_fn)
+ parser.set_defaults(action_kwargs=action_kwargs)
+
+ parser.add_argument('action_args', nargs='*')
+
+
+category_opt = cfg.SubCommandOpt('category',
+ title='Command categories',
+ help='Available categories',
+ handler=add_command_parsers)
+
+
+def main():
+ """Parse options and call the appropriate class/method."""
+ CONF.register_cli_opt(category_opt)
+ try:
+ config.parse_args(sys.argv)
+ logging.setup("nova")
+ except cfg.ConfigFilesNotFoundError:
+ cfgfile = CONF.config_file[-1] if CONF.config_file else None
+ if cfgfile and not os.access(cfgfile, os.R_OK):
+ st = os.stat(cfgfile)
+ print(_("Could not read %s. Re-running with sudo") % cfgfile)
+ try:
+ os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
+ except Exception:
+ print(_('sudo failed, continuing as if nothing happened'))
+
+ print(_('Please re-run nova-manage as root.'))
+ sys.exit(2)
+
+ if CONF.category.name == "version":
+ print(_("%(version)s (%(vcs)s)") %
+ {'version': version.version_string(),
+ 'vcs': version.version_string_with_vcs()})
+ sys.exit(0)
+
+ if CONF.category.name == "bash-completion":
+ if not CONF.category.query_category:
+ print(" ".join(CATEGORIES.keys()))
+ elif CONF.category.query_category in CATEGORIES:
+ fn = CATEGORIES[CONF.category.query_category]
+ command_object = fn()
+ actions = methods_of(command_object)
+ print(" ".join([k for (k, v) in actions]))
+ sys.exit(0)
+
+ fn = CONF.category.action_fn
+ fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
+ fn_kwargs = {}
+ for k in CONF.category.action_kwargs:
+ v = getattr(CONF.category, 'action_kwarg_' + k)
+ if v is None:
+ continue
+ if isinstance(v, basestring):
+ v = v.decode('utf-8')
+ fn_kwargs[k] = v
+
+ # call the action with the remaining arguments
+ # check arguments
+ try:
+ cliutils.validate_args(fn, *fn_args, **fn_kwargs)
+ except cliutils.MissingArgs as e:
+ print(fn.__doc__)
+ parser.print_help()
+ print(e)
+ sys.exit(1)
+ try:
+ fn(*fn_args, **fn_kwargs)
+ sys.exit(0)
+ except Exception:
+ print(_("Command failed, please check log for more info"))
+ raise
+
+
+if __name__ == '__main__':
+ main()
diff --git a/bin/nova-cells b/bin/nova-cells
new file mode 100755
index 000000000..a7e16ef53
--- /dev/null
+++ b/bin/nova-cells
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova Cells Service."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+from nova import config
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import service
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('topic', 'nova.cells.opts', group='cells')
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
+
+if __name__ == '__main__':
+ config.parse_args(sys.argv)
+ logging.setup('nova')
+ utils.monkey_patch()
+ server = service.Service.create(binary='nova-cells',
+ topic=CONF.cells.topic,
+ manager=CONF.cells.manager)
+ service.serve(server)
+ service.wait()
diff --git a/bin/nova-cert b/bin/nova-cert
index 441bda9e5..113b06510 100755
--- a/bin/nova-cert
+++ b/bin/nova-cert
@@ -33,12 +33,13 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('cert_topic', 'nova.cert.rpcapi')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues
index be1d98e3e..618aa4587 100755
--- a/bin/nova-clear-rabbit-queues
+++ b/bin/nova-clear-rabbit-queues
@@ -40,21 +40,26 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import config
from nova import context
from nova import exception
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
-delete_exchange_opt = cfg.BoolOpt('delete_exchange',
- default=False,
- help='delete nova exchange too.')
+opts = [
+ cfg.MultiStrOpt('queues',
+ default=[],
+ positional=True,
+ help='Queues to delete'),
+ cfg.BoolOpt('delete_exchange',
+ default=False,
+ help='delete nova exchange too.'),
+]
-CONF = config.CONF
-CONF.register_cli_opt(delete_exchange_opt)
+CONF = cfg.CONF
+CONF.register_cli_opts(opts)
def delete_exchange(exch):
@@ -70,8 +75,8 @@ def delete_queues(queues):
x.queue_delete(q)
if __name__ == '__main__':
- args = config.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
- delete_queues(args[1:])
+ delete_queues(CONF.queues)
if CONF.delete_exchange:
delete_exchange(CONF.control_exchange)
diff --git a/bin/nova-compute b/bin/nova-compute
index f850e1b8c..8826015d4 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -42,18 +42,20 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
if __name__ == '__main__':
config.parse_args(sys.argv)
logging.setup('nova')
utils.monkey_patch()
server = service.Service.create(binary='nova-compute',
- topic=CONF.compute_topic)
+ topic=CONF.compute_topic,
+ db_allowed=False)
service.serve(server)
service.wait()
diff --git a/bin/nova-conductor b/bin/nova-conductor
new file mode 100755
index 000000000..5d9e355dd
--- /dev/null
+++ b/bin/nova-conductor
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova Conductor."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+from nova import config
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import service
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
+
+if __name__ == '__main__':
+ config.parse_args(sys.argv)
+ logging.setup("nova")
+ utils.monkey_patch()
+ server = service.Service.create(binary='nova-conductor',
+ topic=CONF.conductor.topic,
+ manager=CONF.conductor.manager)
+ service.serve(server)
+ service.wait()
diff --git a/bin/nova-console b/bin/nova-console
index c75e088c8..011855a19 100755
--- a/bin/nova-console
+++ b/bin/nova-console
@@ -34,11 +34,12 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('console_topic', 'nova.console.rpcapi')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth
index 654a3f824..9a6016326 100755
--- a/bin/nova-consoleauth
+++ b/bin/nova-consoleauth
@@ -31,13 +31,13 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
-from nova.consoleauth import manager
from nova import config
-from nova import flags
+from nova.consoleauth import manager
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
-CONF = config.CONF
+CONF = cfg.CONF
if __name__ == "__main__":
config.parse_args(sys.argv)
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index ed36c47bc..ee7bf2da9 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -35,18 +35,20 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import config
from nova import context
from nova import db
-from nova import config
-from nova import flags
from nova.network import linux_net
from nova.network import rpcapi as network_rpcapi
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('network_manager', 'nova.service')
LOG = logging.getLogger('nova.dhcpbridge')
@@ -91,25 +93,44 @@ def init_leases(network_id):
return network_manager.get_dhcp_leases(ctxt, network_ref)
+def add_action_parsers(subparsers):
+ parser = subparsers.add_parser('init')
+
+ for action in ['add', 'del', 'old']:
+ parser = subparsers.add_parser(action)
+ parser.add_argument('mac')
+ parser.add_argument('ip')
+ parser.set_defaults(func=globals()[action + '_lease'])
+
+
+CONF.register_cli_opt(
+ cfg.SubCommandOpt('action',
+ title='Action options',
+ help='Available dhcpbridge options',
+ handler=add_action_parsers))
+
+
def main():
- """Parse environment and arguments and call the approproate action."""
- flagfile = os.environ.get('FLAGFILE', CONF.dhcpbridge_flagfile)
- argv = config.parse_args(sys.argv, default_config_files=[flagfile])
+ """Parse environment and arguments and call the appropriate action."""
+ try:
+ config_file = os.environ['CONFIG_FILE']
+ except KeyError:
+ config_file = os.environ['FLAGFILE']
+
+ config.parse_args(sys.argv, default_config_files=[config_file])
+
logging.setup("nova")
if int(os.environ.get('TESTING', '0')):
- from nova.tests import fake_flags
+ from nova.tests import conf_fixture
- action = argv[1]
- if action in ['add', 'del', 'old']:
- mac = argv[2]
- ip = argv[3]
+ if CONF.action.name in ['add', 'del', 'old']:
msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") %
- {"action": action,
- "mac": mac,
- "ip": ip})
+ {"action": CONF.action.name,
+ "mac": CONF.action.mac,
+ "ip": CONF.action.ip})
LOG.debug(msg)
- globals()[action + '_lease'](mac, ip)
+ CONF.action.func(CONF.action.mac, CONF.action.ip)
else:
try:
network_id = int(os.environ.get('NETWORK_ID'))
diff --git a/bin/nova-manage b/bin/nova-manage
index 43ff5ebca..4f3d889ea 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -56,7 +56,6 @@
import gettext
import netaddr
-import optparse
import os
import sys
@@ -71,6 +70,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova.api.ec2 import ec2utils
+from nova import availability_zones
from nova.compute import instance_types
from nova.compute import rpcapi as compute_rpcapi
from nova import config
@@ -78,8 +78,8 @@ from nova import context
from nova import db
from nova.db import migration
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import cliutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -89,7 +89,9 @@ from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova import version
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('network_manager', 'nova.service')
+CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('flat_network_bridge', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('multi_host', 'nova.network.manager')
@@ -105,7 +107,7 @@ QUOTAS = quota.QUOTAS
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
- func.__dict__.setdefault('options', []).insert(0, (args, kwargs))
+ func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
@@ -203,7 +205,7 @@ class ShellCommands(object):
@args('--path', dest='path', metavar='<path>', help='Script path')
def script(self, path):
- """Runs the script from the specifed path with flags set properly.
+ """Runs the script from the specified path with flags set properly.
arguments: path"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
@@ -219,8 +221,12 @@ def _db_error(caught_exception):
class ProjectCommands(object):
"""Class for managing projects."""
+ @args('--project', dest="project_id", metavar='<Project name>',
+ help='Project name')
+ @args('--key', dest="key", metavar='<key>', help='Key')
+ @args('--value', dest="value", metavar='<value>', help='Value')
def quota(self, project_id, key=None, value=None):
- """Set or display quotas for project"""
+ """Set or display quotas for project."""
ctxt = context.get_admin_context()
project_quota = QUOTAS.get_project_quotas(ctxt, project_id)
if key and key in project_quota:
@@ -244,7 +250,7 @@ class ProjectCommands(object):
@args('--project', dest="project_id", metavar='<Project name>',
help='Project name')
def scrub(self, project_id):
- """Deletes data associated with project"""
+ """Deletes data associated with project."""
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
@@ -262,7 +268,7 @@ class FixedIpCommands(object):
@args('--host', dest="host", metavar='<host>', help='Host')
def list(self, host=None):
- """Lists all fixed ips (optionally by host)"""
+ """Lists all fixed ips (optionally by host)."""
ctxt = context.get_admin_context()
try:
@@ -301,7 +307,6 @@ class FixedIpCommands(object):
for fixed_ip in fixed_ips:
hostname = None
host = None
- mac_address = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
@@ -376,7 +381,7 @@ class FloatingIpCommands(object):
@args('--interface', dest="interface", metavar='<interface>',
help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
- """Creates floating ips for zone by range"""
+ """Creates floating ips for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
@@ -396,7 +401,7 @@ class FloatingIpCommands(object):
@args('--ip_range', dest="ip_range", metavar='<range>', help='IP range')
def delete(self, ip_range):
- """Deletes floating ips by range"""
+ """Deletes floating ips by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
@@ -470,7 +475,7 @@ class NetworkCommands(object):
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
- """Creates fixed ips for host by range"""
+ """Creates fixed ips for host by range."""
kwargs = dict(((k, v) for k, v in locals().iteritems()
if v and k != "self"))
if multi_host is not None:
@@ -479,7 +484,7 @@ class NetworkCommands(object):
net_manager.create_networks(context.get_admin_context(), **kwargs)
def list(self):
- """List all created networks"""
+ """List all created networks."""
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print _fmt % (_('id'),
_('IPv4'),
@@ -514,18 +519,19 @@ class NetworkCommands(object):
@args('--uuid', dest='uuid', metavar='<uuid>',
help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
- """Deletes a network"""
+ """Deletes a network."""
if fixed_range is None and uuid is None:
- raise Exception("Please specify either fixed_range or uuid")
+ raise Exception(_("Please specify either fixed_range or uuid"))
net_manager = importutils.import_object(CONF.network_manager)
if "QuantumManager" in CONF.network_manager:
if uuid is None:
- raise Exception("UUID is required to delete Quantum Networks")
+ raise Exception(_("UUID is required to delete "
+ "Quantum Networks"))
if fixed_range:
- raise Exception("Deleting by fixed_range is not supported "
- "with the QuantumManager")
+ raise Exception(_("Deleting by fixed_range is not supported "
+ "with the QuantumManager"))
# delete the network
net_manager.delete_network(context.get_admin_context(),
fixed_range, uuid)
@@ -569,7 +575,7 @@ class VmCommands(object):
@args('--host', dest="host", metavar='<host>', help='Host')
def list(self, host=None):
- """Show a list of all instances"""
+ """Show a list of all instances."""
print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5s" % (_('instance'),
@@ -608,7 +614,7 @@ class VmCommands(object):
class ServiceCommands(object):
- """Enable and disable running services"""
+ """Enable and disable running services."""
@args('--host', dest='host', metavar='<host>', help='Host')
@args('--service', dest='service', metavar='<service>',
@@ -620,6 +626,7 @@ class ServiceCommands(object):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
+ services = availability_zones.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -647,7 +654,7 @@ class ServiceCommands(object):
@args('--service', dest='service', metavar='<service>',
help='Nova service')
def enable(self, host, service):
- """Enable scheduling for a service"""
+ """Enable scheduling for a service."""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
@@ -659,7 +666,7 @@ class ServiceCommands(object):
@args('--service', dest='service', metavar='<service>',
help='Nova service')
def disable(self, host, service):
- """Disable scheduling for a service"""
+ """Disable scheduling for a service."""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
@@ -725,7 +732,7 @@ class ServiceCommands(object):
class HostCommands(object):
- """List hosts"""
+ """List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
@@ -733,8 +740,8 @@ class HostCommands(object):
print "%-25s\t%-15s" % (_('host'),
_('zone'))
ctxt = context.get_admin_context()
- now = timeutils.utcnow()
services = db.service_get_all(ctxt)
+ services = availability_zones.set_availability_zones(ctxt, services)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
@@ -763,26 +770,10 @@ class DbCommands(object):
print migration.db_version()
-class VersionCommands(object):
- """Class for exposing the codebase version."""
-
- def __init__(self):
- pass
-
- def list(self):
- print (_("%(version)s (%(vcs)s)") %
- {'version': version.version_string(),
- 'vcs': version.version_string_with_vcs()})
-
- def __call__(self):
- self.list()
-
-
class InstanceTypeCommands(object):
"""Class for managing instance types / flavors."""
def _print_instance_types(self, name, val):
- deleted = ('', ', inactive')[val["deleted"] == 1]
is_public = ('private', 'public')[val["is_public"] == 1]
print ("%s: Memory: %sMB, VCPUS: %s, Root: %sGB, Ephemeral: %sGb, "
"FlavorID: %s, Swap: %sMB, RXTX Factor: %s, %s, ExtraSpecs %s") % (
@@ -807,8 +798,8 @@ class InstanceTypeCommands(object):
@args('--is_public', dest="is_public", metavar='<is_public>',
help='Make flavor accessible to the public')
def create(self, name, memory, vcpus, root_gb, ephemeral_gb=0,
- flavorid=None, swap=0, rxtx_factor=1, is_public=True):
- """Creates instance types / flavors"""
+ flavorid=None, swap=0, rxtx_factor=1.0, is_public=True):
+ """Creates instance types / flavors."""
try:
instance_types.create(name, memory, vcpus, root_gb,
ephemeral_gb, flavorid, swap, rxtx_factor,
@@ -834,7 +825,7 @@ class InstanceTypeCommands(object):
@args('--name', dest='name', metavar='<name>',
help='Name of instance type/flavor')
def delete(self, name):
- """Marks instance types / flavors as deleted"""
+ """Marks instance types / flavors as deleted."""
try:
instance_types.destroy(name)
except exception.InstanceTypeNotFound:
@@ -851,7 +842,7 @@ class InstanceTypeCommands(object):
@args('--name', dest='name', metavar='<name>',
help='Name of instance type/flavor')
def list(self, name=None):
- """Lists all active or specific instance types / flavors"""
+ """Lists all active or specific instance types / flavors."""
try:
if name is None:
inst_types = instance_types.get_all_types()
@@ -872,7 +863,7 @@ class InstanceTypeCommands(object):
@args('--value', dest='value', metavar='<value>',
help='The value of the key/value pair')
def set_key(self, name, key, value=None):
- """Add key/value pair to specified instance type's extra_specs"""
+ """Add key/value pair to specified instance type's extra_specs."""
try:
try:
inst_type = instance_types.get_instance_type_by_name(name)
@@ -896,7 +887,7 @@ class InstanceTypeCommands(object):
@args('--key', dest='key', metavar='<key>',
help='The key to be deleted')
def unset_key(self, name, key):
- """Delete the specified extra spec for instance type"""
+ """Delete the specified extra spec for instance type."""
try:
try:
inst_type = instance_types.get_instance_type_by_name(name)
@@ -915,146 +906,6 @@ class InstanceTypeCommands(object):
_db_error(e)
-class StorageManagerCommands(object):
- """Class for mangaging Storage Backends and Flavors"""
-
- def flavor_list(self, flavor=None):
- ctxt = context.get_admin_context()
-
- try:
- if flavor is None:
- flavors = db.sm_flavor_get_all(ctxt)
- else:
- flavors = db.sm_flavor_get(ctxt, flavor)
- except exception.NotFound as ex:
- print _('error: %s') % ex
- sys.exit(2)
-
- print "%-18s\t%-20s\t%s" % (_('id'),
- _('Label'),
- _('Description'))
-
- for flav in flavors:
- print "%-18s\t%-20s\t%s" % (
- flav['id'],
- flav['label'],
- flav['description'])
-
- def flavor_create(self, label, desc):
- # TODO(renukaapte) flavor name must be unique
- try:
- db.sm_flavor_create(context.get_admin_context(),
- dict(label=label,
- description=desc))
- except exception.DBError, e:
- _db_error(e)
-
- def flavor_delete(self, label):
- try:
- db.sm_flavor_delete(context.get_admin_context(), label)
-
- except exception.DBError, e:
- _db_error(e)
-
- def _splitfun(self, item):
- i = item.split("=")
- return i[0:2]
-
- def backend_list(self, backend_conf_id=None):
- ctxt = context.get_admin_context()
-
- try:
- if backend_conf_id is None:
- backends = db.sm_backend_conf_get_all(ctxt)
- else:
- backends = db.sm_backend_conf_get(ctxt, backend_conf_id)
-
- except exception.NotFound as ex:
- print _('error: %s') % ex
- sys.exit(2)
-
- print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
- _('Flavor id'),
- _('SR UUID'),
- _('SR Type'),
- _('Config Parameters'),)
-
- for b in backends:
- print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
- b['flavor_id'],
- b['sr_uuid'],
- b['sr_type'],
- b['config_params'],)
-
- def backend_add(self, flavor_label, sr_type, *args):
- # TODO(renukaapte) Add backend_introduce.
- ctxt = context.get_admin_context()
- params = dict(map(self._splitfun, args))
-
- if 'sr_uuid' in params:
- try:
- backend = db.sm_backend_conf_get_by_sr(ctxt,
- params['sr_uuid'])
- except exception.DBError, e:
- _db_error(e)
-
- if backend:
- print _('Backend config found. Would you like to recreate '
- 'this?')
- print _('(WARNING:Recreating will destroy all VDIs on '
- 'backend!!)')
- c = raw_input(_('Proceed? (y/n) '))
- if c == 'y' or c == 'Y':
- try:
- db.sm_backend_conf_update(ctxt, backend['id'],
- dict(created=False))
- except exception.DBError, e:
- _db_error(e)
- return
-
- else:
- print _('Backend config not found. Would you like to create '
- 'it?')
- print _('(WARNING: Creating will destroy all data on '
- 'backend!!!)')
- c = raw_input(_('Proceed? (y/n) '))
- if c != 'y' and c != 'Y':
- return
-
- print _('(WARNING: Creating will destroy all data on backend!!!)')
- c = raw_input(_('Proceed? (y/n) '))
- if c == 'y' or c == 'Y':
- if flavor_label is None:
- print _('error: backend needs to be associated with flavor')
- sys.exit(2)
-
- try:
- flavors = db.sm_flavor_get_by_label(ctxt, flavor_label)
- except exception.NotFound as ex:
- print _('error: %s') % ex
- sys.exit(2)
-
- config_params = "".join(['%s=%s ' %
- (key, params[key]) for key in params])
-
- try:
- db.sm_backend_conf_create(ctxt,
- dict(flavor_id=flavors['id'],
- sr_uuid=None,
- sr_type=sr_type,
- config_params=config_params))
- except exception.DBError, e:
- _db_error(e)
-
- def backend_remove(self, backend_conf_id):
- try:
- db.sm_backend_conf_delete(context.get_admin_context(),
- backend_conf_id)
-
- except exception.DBError, e:
- _db_error(e)
-
-
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@@ -1116,15 +967,15 @@ class AgentBuildCommands(object):
class GetLogCommands(object):
- """Get logging information"""
+ """Get logging information."""
def errors(self):
- """Get all of the errors from the log files"""
+ """Get all of the errors from the log files."""
error_found = 0
- if CONF.logdir:
- logs = [x for x in os.listdir(CONF.logdir) if x.endswith('.log')]
+ if CONF.log_dir:
+ logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
- log_file = os.path.join(CONF.logdir, file)
+ log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
@@ -1140,7 +991,7 @@ class GetLogCommands(object):
print _('No errors in logfiles!')
def syslog(self, num_entries=10):
- """Get <num_entries> of the nova syslog events"""
+ """Get <num_entries> of the nova syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
@@ -1165,46 +1016,87 @@ class GetLogCommands(object):
print _('No nova entries in syslog!')
-CATEGORIES = [
- ('account', AccountCommands),
- ('agent', AgentBuildCommands),
- ('db', DbCommands),
- ('fixed', FixedIpCommands),
- ('flavor', InstanceTypeCommands),
- ('floating', FloatingIpCommands),
- ('host', HostCommands),
- ('instance_type', InstanceTypeCommands),
- ('logs', GetLogCommands),
- ('network', NetworkCommands),
- ('project', ProjectCommands),
- ('service', ServiceCommands),
- ('shell', ShellCommands),
- ('sm', StorageManagerCommands),
- ('version', VersionCommands),
- ('vm', VmCommands),
- ('vpn', VpnCommands),
-]
-
-
-def lazy_match(name, key_value_tuples):
- """Finds all objects that have a key that case insensitively contains
- [name] key_value_tuples is a list of tuples of the form (key, value)
- returns a list of tuples of the form (key, value)"""
- result = []
- for (k, v) in key_value_tuples:
- if k.lower().find(name.lower()) == 0:
- result.append((k, v))
- if len(result) == 0:
- print _('%s does not match any options:') % name
- for k, _v in key_value_tuples:
- print "\t%s" % k
- sys.exit(2)
- if len(result) > 1:
- print _('%s matched multiple options:') % name
- for k, _v in result:
- print "\t%s" % k
- sys.exit(2)
- return result
+class CellCommands(object):
+ """Commands for managing cells."""
+
+ @args('--name', dest='name', metavar='<name>',
+ help='Name for the new cell')
+ @args('--cell_type', dest='cell_type', metavar='<parent|child>',
+ help='Whether the cell is a parent or child')
+ @args('--username', dest='username', metavar='<username>',
+ help='Username for the message broker in this cell')
+ @args('--password', dest='password', metavar='<password>',
+ help='Password for the message broker in this cell')
+ @args('--hostname', dest='hostname', metavar='<hostname>',
+ help='Address of the message broker in this cell')
+ @args('--port', dest='port', metavar='<number>',
+ help='Port number of the message broker in this cell')
+ @args('--virtual_host', dest='virtual_host', metavar='<virtual_host>',
+ help='The virtual host of the message broker in this cell')
+ @args('--woffset', dest='woffset', metavar='<float>')
+ @args('--wscale', dest='wscale', metavar='<float>')
+ def create(self, name, cell_type='child', username=None, password=None,
+ hostname=None, port=None, virtual_host=None,
+ woffset=None, wscale=None):
+
+ if cell_type not in ['parent', 'child']:
+ print "Error: cell type must be 'parent' or 'child'"
+ sys.exit(2)
+
+ is_parent = cell_type == 'parent'
+ values = {'name': name,
+ 'is_parent': is_parent,
+ 'username': username,
+ 'password': password,
+ 'rpc_host': hostname,
+ 'rpc_port': int(port),
+ 'rpc_virtual_host': virtual_host,
+ 'weight_offset': float(woffset),
+ 'weight_scale': float(wscale)}
+ ctxt = context.get_admin_context()
+ db.cell_create(ctxt, values)
+
+ @args('--cell_name', dest='cell_name', metavar='<cell_name>',
+ help='Name of the cell to delete')
+ def delete(self, cell_name):
+ ctxt = context.get_admin_context()
+ db.cell_delete(ctxt, cell_name)
+
+ def list(self):
+ ctxt = context.get_admin_context()
+ cells = db.cell_get_all(ctxt)
+ fmt = "%3s %-10s %-6s %-10s %-15s %-5s %-10s"
+ print fmt % ('Id', 'Name', 'Type', 'Username', 'Hostname',
+ 'Port', 'VHost')
+ print fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
+ '-' * 5, '-' * 10)
+ for cell in cells:
+ print fmt % (cell.id, cell.name,
+ 'parent' if cell.is_parent else 'child',
+ cell.username, cell.rpc_host,
+ cell.rpc_port, cell.rpc_virtual_host)
+ print fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
+ '-' * 5, '-' * 10)
+
+
+CATEGORIES = {
+ 'account': AccountCommands,
+ 'agent': AgentBuildCommands,
+ 'cell': CellCommands,
+ 'db': DbCommands,
+ 'fixed': FixedIpCommands,
+ 'flavor': InstanceTypeCommands,
+ 'floating': FloatingIpCommands,
+ 'host': HostCommands,
+ 'instance_type': InstanceTypeCommands,
+ 'logs': GetLogCommands,
+ 'network': NetworkCommands,
+ 'project': ProjectCommands,
+ 'service': ServiceCommands,
+ 'shell': ShellCommands,
+ 'vm': VmCommands,
+ 'vpn': VpnCommands,
+}
def methods_of(obj):
@@ -1217,11 +1109,46 @@ def methods_of(obj):
return result
+def add_command_parsers(subparsers):
+ parser = subparsers.add_parser('version')
+
+ parser = subparsers.add_parser('bash-completion')
+ parser.add_argument('query_category', nargs='?')
+
+ for category in CATEGORIES:
+ command_object = CATEGORIES[category]()
+
+ parser = subparsers.add_parser(category)
+ parser.set_defaults(command_object=command_object)
+
+ category_subparsers = parser.add_subparsers(dest='action')
+
+ for (action, action_fn) in methods_of(command_object):
+ parser = category_subparsers.add_parser(action)
+
+ action_kwargs = []
+ for args, kwargs in getattr(action_fn, 'args', []):
+ action_kwargs.append(kwargs['dest'])
+ kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
+ parser.add_argument(*args, **kwargs)
+
+ parser.set_defaults(action_fn=action_fn)
+ parser.set_defaults(action_kwargs=action_kwargs)
+
+ parser.add_argument('action_args', nargs='*')
+
+
+category_opt = cfg.SubCommandOpt('category',
+ title='Command categories',
+ help='Available categories',
+ handler=add_command_parsers)
+
+
def main():
"""Parse options and call the appropriate class/method."""
-
+ CONF.register_cli_opt(category_opt)
try:
- argv = config.parse_args(sys.argv)
+ config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
@@ -1236,79 +1163,47 @@ def main():
print _('Please re-run nova-manage as root.')
sys.exit(2)
- script_name = argv.pop(0)
- if len(argv) < 1:
- print (_("\nOpenStack Nova version: %(version)s (%(vcs)s)\n") %
- {'version': version.version_string(),
- 'vcs': version.version_string_with_vcs()})
- print script_name + " category action [<args>]"
- print _("Available categories:")
- for k, _v in CATEGORIES:
- print "\t%s" % k
- sys.exit(2)
- category = argv.pop(0)
- if category == "bash-completion":
- if len(argv) < 1:
- print " ".join([k for (k, v) in CATEGORIES])
- else:
- query_category = argv.pop(0)
- matches = lazy_match(query_category, CATEGORIES)
- # instantiate the command group object
- category, fn = matches[0]
+ if CONF.category.name == "version":
+ print version.version_string_with_package()
+ sys.exit(0)
+
+ if CONF.category.name == "bash-completion":
+ if not CONF.category.query_category:
+ print " ".join(CATEGORIES.keys())
+ elif CONF.category.query_category in CATEGORIES:
+ fn = CATEGORIES[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print " ".join([k for (k, v) in actions])
sys.exit(0)
- matches = lazy_match(category, CATEGORIES)
- # instantiate the command group object
- category, fn = matches[0]
- command_object = fn()
- actions = methods_of(command_object)
- if len(argv) < 1:
- if hasattr(command_object, '__call__'):
- action = ''
- fn = command_object.__call__
- else:
- print script_name + " category action [<args>]"
- print _("Available actions for %s category:") % category
- for k, _v in actions:
- print "\t%s" % k
- sys.exit(2)
- else:
- action = argv.pop(0)
- matches = lazy_match(action, actions)
- action, fn = matches[0]
-
- # For not decorated methods
- options = getattr(fn, 'options', [])
-
- usage = "%%prog %s %s <args> [options]" % (category, action)
- parser = optparse.OptionParser(usage=usage)
- for ar, kw in options:
- parser.add_option(*ar, **kw)
- (opts, fn_args) = parser.parse_args(argv)
- fn_kwargs = vars(opts)
- for k, v in fn_kwargs.items():
+ fn = CONF.category.action_fn
+ fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
+ fn_kwargs = {}
+ for k in CONF.category.action_kwargs:
+ v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
- del fn_kwargs[k]
- elif isinstance(v, basestring):
- fn_kwargs[k] = v.decode('utf-8')
- else:
- fn_kwargs[k] = v
-
- fn_args = [arg.decode('utf-8') for arg in fn_args]
+ continue
+ if isinstance(v, basestring):
+ v = v.decode('utf-8')
+ fn_kwargs[k] = v
# call the action with the remaining arguments
+ # check arguments
+ try:
+ cliutils.validate_args(fn, *fn_args, **fn_kwargs)
+ except cliutils.MissingArgs as e:
+ # NOTE(mikal): this isn't the most helpful error message ever. It is
+ # long, and tells you a lot of things you probably don't want to know
+ # if you just got a single arg wrong.
+ print fn.__doc__
+ CONF.print_help()
+ print e
+ sys.exit(1)
try:
fn(*fn_args, **fn_kwargs)
rpc.cleanup()
sys.exit(0)
- except TypeError:
- print _("Possible wrong number of arguments supplied")
- print fn.__doc__
- parser.print_help()
- raise
except Exception:
print _("Command failed, please check log for more info")
raise
diff --git a/bin/nova-network b/bin/nova-network
index def7782d7..03472371c 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -35,12 +35,13 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('network_topic', 'nova.network.rpcapi')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 1ba43aa01..beee143f5 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -17,8 +17,8 @@
# under the License.
'''
-Websocket proxy that is compatible with OpenStack Nova.
-Leverages websockify.py by Joel Martin
+Websocket proxy that is compatible with OpenStack Nova
+noVNC consoles. Leverages websockify.py by Joel Martin
'''
import Cookie
@@ -31,7 +31,6 @@ import websockify
from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -68,7 +67,7 @@ opts = [
help='Port on which to listen for incoming requests'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_cli_opts(opts)
LOG = logging.getLogger(__name__)
@@ -92,7 +91,7 @@ class NovaWebSocketProxy(websockify.WebSocketProxy):
if not connect_info:
LOG.audit("Invalid Token: %s", token)
- raise Exception("Invalid Token")
+ raise Exception(_("Invalid Token"))
host = connect_info['host']
port = int(connect_info['port'])
@@ -111,7 +110,7 @@ class NovaWebSocketProxy(websockify.WebSocketProxy):
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit("Invalid Connection Info %s", token)
- raise Exception("Invalid Connection Info")
+ raise Exception(_("Invalid Connection Info"))
tsock.recv(len(data))
break
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 9b9e2b7a7..8ec9fbf35 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -35,7 +35,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.objectstore import s3server
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index a28205a80..c8e880d79 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -33,7 +33,9 @@
"""
import ConfigParser
+import logging
import os
+import pwd
import signal
import subprocess
import sys
@@ -42,6 +44,7 @@ import sys
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98
RC_BADCONFIG = 97
+RC_NOEXECFOUND = 96
def _subprocess_setup():
@@ -50,25 +53,22 @@ def _subprocess_setup():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+def _exit_error(execname, message, errorcode, log=True):
+ print "%s: %s" % (execname, message)
+ if log:
+ logging.error(message)
+ sys.exit(errorcode)
+
+
if __name__ == '__main__':
# Split arguments, require at least a command
execname = sys.argv.pop(0)
if len(sys.argv) < 2:
- print "%s: %s" % (execname, "No command specified")
- sys.exit(RC_NOCOMMAND)
+ _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
configfile = sys.argv.pop(0)
userargs = sys.argv[:]
- # Load configuration
- config = ConfigParser.RawConfigParser()
- config.read(configfile)
- try:
- filters_path = config.get("DEFAULT", "filters_path").split(",")
- except ConfigParser.Error:
- print "%s: Incorrect configuration file: %s" % (execname, configfile)
- sys.exit(RC_BADCONFIG)
-
# Add ../ to sys.path to allow running from branch
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
os.pardir, os.pardir))
@@ -77,18 +77,51 @@ if __name__ == '__main__':
from nova.rootwrap import wrapper
+ # Load configuration
+ try:
+ rawconfig = ConfigParser.RawConfigParser()
+ rawconfig.read(configfile)
+ config = wrapper.RootwrapConfig(rawconfig)
+ except ValueError as exc:
+ msg = "Incorrect value in %s: %s" % (configfile, exc.message)
+ _exit_error(execname, msg, RC_BADCONFIG, log=False)
+ except ConfigParser.Error:
+ _exit_error(execname, "Incorrect configuration file: %s" % configfile,
+ RC_BADCONFIG, log=False)
+
+ if config.use_syslog:
+ wrapper.setup_syslog(execname,
+ config.syslog_log_facility,
+ config.syslog_log_level)
+
# Execute command if it matches any of the loaded filters
- filters = wrapper.load_filters(filters_path)
- filtermatch = wrapper.match_filter(filters, userargs)
- if filtermatch:
- obj = subprocess.Popen(filtermatch.get_command(userargs),
- stdin=sys.stdin,
- stdout=sys.stdout,
- stderr=sys.stderr,
- preexec_fn=_subprocess_setup,
- env=filtermatch.get_environment(userargs))
- obj.wait()
- sys.exit(obj.returncode)
-
- print "Unauthorized command: %s" % ' '.join(userargs)
- sys.exit(RC_UNAUTHORIZED)
+ filters = wrapper.load_filters(config.filters_path)
+ try:
+ filtermatch = wrapper.match_filter(filters, userargs,
+ exec_dirs=config.exec_dirs)
+ if filtermatch:
+ command = filtermatch.get_command(userargs,
+ exec_dirs=config.exec_dirs)
+ if config.use_syslog:
+ logging.info("(%s > %s) Executing %s (filter match = %s)" % (
+ os.getlogin(), pwd.getpwuid(os.getuid())[0],
+ command, filtermatch.name))
+
+ obj = subprocess.Popen(command,
+ stdin=sys.stdin,
+ stdout=sys.stdout,
+ stderr=sys.stderr,
+ preexec_fn=_subprocess_setup,
+ env=filtermatch.get_environment(userargs))
+ obj.wait()
+ sys.exit(obj.returncode)
+
+ except wrapper.FilterMatchNotExecutable as exc:
+ msg = ("Executable not found: %s (filter match = %s)"
+ % (exc.match.exec_path, exc.match.name))
+ _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
+
+ except wrapper.NoFilterMatched:
+ msg = ("Unauthorized command: %s (no filter matched)"
+ % ' '.join(userargs))
+ _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver
index d6849ce9d..f89b0b3ba 100755
--- a/bin/nova-rpc-zmq-receiver
+++ b/bin/nova-rpc-zmq-receiver
@@ -34,13 +34,13 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import impl_zmq
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index 73dfab207..507ff3d5a 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -37,12 +37,13 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('scheduler_topic', 'nova.scheduler.rpcapi')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy
index b816bf2e9..7882645ad 100755
--- a/bin/nova-xvpvncproxy
+++ b/bin/nova-xvpvncproxy
@@ -32,7 +32,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import service
diff --git a/contrib/openstack-config b/contrib/openstack-config
deleted file mode 100755
index d7979f7ff..000000000
--- a/contrib/openstack-config
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/sh -e
-### BEGIN INIT INFO
-# Provides: openstack
-# Required-Start: mountkernfs $local_fs
-# Required-Stop: $local_fs
-# X-Start-Before: networking
-# Should-Start:
-# Default-Start: S
-# Default-Stop:
-# Short-Description: Apply configuration from OpenStack Config Drive
-### END INIT INFO
-
-PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin"
-
-. /lib/lsb/init-functions
-
-copy_cloud_config() {
- LABEL="config"
- if [ ! -e /dev/disk/by-label/${LABEL} ]; then
- log_warning_msg "OpenStack Cloud Config drive not found"
- return 1
- fi
-
- MNT=/tmp/config
- mkdir -p ${MNT}
- mount /dev/disk/by-label/${LABEL} ${MNT}
- if [ -e ${MNT}/root/.ssh/authorized_keys ]; then
- mkdir -m 700 -p /root/.ssh/
- cp ${MNT}/root/.ssh/authorized_keys /root/.ssh/
- chmod 600 ${MNT}/root/.ssh/authorized_keys
- fi
- if [ -e ${MNT}/etc/network/interfaces ]; then
- cp ${MNT}/etc/network/interfaces /etc/network/
- chmod 644 /etc/network/interfaces
- fi
- umount ${MNT}
- return 0
-}
-
-case "$1" in
- start|"")
- log_action_begin_msg "Applying OpenStack Cloud Config"
- if copy_cloud_config; then
- log_action_end_msg $?
- else
- log_action_end_msg $?
- fi
- ;;
-
- restart|reload|force-reload|status)
- echo "Error: argument '$1' not supported" >&2
- exit 3
- ;;
-
- stop)
- # No-op
- ;;
-
- *)
- echo "Usage: openstack.sh [start|stop]" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/doc/api_samples/OS-DCF/image-get-resp.json b/doc/api_samples/OS-DCF/image-get-resp.json
new file mode 100644
index 000000000..feae88fb7
--- /dev/null
+++ b/doc/api_samples/OS-DCF/image-get-resp.json
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/image-get-resp.xml b/doc/api_samples/OS-DCF/image-get-resp.xml
new file mode 100644
index 000000000..fc0317742
--- /dev/null
+++ b/doc/api_samples/OS-DCF/image-get-resp.xml
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+</image> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/image-list-resp.json b/doc/api_samples/OS-DCF/image-list-resp.json
new file mode 100644
index 000000000..fe18205f8
--- /dev/null
+++ b/doc/api_samples/OS-DCF/image-list-resp.json
@@ -0,0 +1,214 @@
+{
+ "images": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-DCF:diskConfig": "MANUAL",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/image-list-resp.xml b/doc/api_samples/OS-DCF/image-list-resp.xml
new file mode 100644
index 000000000..f5c463e25
--- /dev/null
+++ b/doc/api_samples/OS-DCF/image-list-resp.xml
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a440c04b-79fa-479c-bed1-0b816eaec379" OS-DCF:diskConfig="MANUAL">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/list-servers-detail-get.json b/doc/api_samples/OS-DCF/list-servers-detail-get.json
new file mode 100644
index 000000000..7aea51e19
--- /dev/null
+++ b/doc/api_samples/OS-DCF/list-servers-detail-get.json
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2012-12-02T02:11:55Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "99428f32351a5d89d0f7727c6eec68c1777c545a0972aaac645508dc",
+ "id": "05372e62-05b9-4ee2-9343-9a1fdf2a5fda",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/05372e62-05b9-4ee2-9343-9a1fdf2a5fda",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/05372e62-05b9-4ee2-9343-9a1fdf2a5fda",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-02T02:11:56Z",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/list-servers-detail-get.xml b/doc/api_samples/OS-DCF/list-servers-detail-get.xml
new file mode 100644
index 000000000..1675b511b
--- /dev/null
+++ b/doc/api_samples/OS-DCF/list-servers-detail-get.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2012-12-02T02:15:38Z" hostId="85973b57730e91f4eea36b3e7a2a7e3fdaf56008af335dd59f897a59" name="new-server-test" created="2012-12-02T02:15:38Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="08266bed-2651-4b6c-9dc8-83f0c3ef9d38" OS-DCF:diskConfig="AUTO">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/08266bed-2651-4b6c-9dc8-83f0c3ef9d38" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/08266bed-2651-4b6c-9dc8-83f0c3ef9d38" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-action-rebuild-req.json b/doc/api_samples/OS-DCF/server-action-rebuild-req.json
new file mode 100644
index 000000000..514607f6d
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-action-rebuild-req.json
@@ -0,0 +1,6 @@
+{
+ "rebuild": {
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-action-rebuild-req.xml b/doc/api_samples/OS-DCF/server-action-rebuild-req.xml
new file mode 100644
index 000000000..1c0fcaa5c
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-action-rebuild-req.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rebuild xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b"
+ OS-DCF:diskConfig="AUTO" /> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-action-rebuild-resp.json b/doc/api_samples/OS-DCF/server-action-rebuild-resp.json
new file mode 100644
index 000000000..24e1e5d04
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-action-rebuild-resp.json
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "NBjMaJoFL4EF",
+ "created": "2012-12-02T02:11:56Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "c076393ad900d62c4805a42df10d9b364f629842681c00cce035487f",
+ "id": "63a8aa13-60fe-41c4-b079-77f6fdf3c841",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/63a8aa13-60fe-41c4-b079-77f6fdf3c841",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/63a8aa13-60fe-41c4-b079-77f6fdf3c841",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-02T02:11:56Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-action-rebuild-resp.xml b/doc/api_samples/OS-DCF/server-action-rebuild-resp.xml
new file mode 100644
index 000000000..1d7b3267c
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-action-rebuild-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-12-02T02:15:39Z" hostId="981de784ae4d8c49ca075024977828a16e7f3c2beeb19115b0366e17" name="new-server-test" created="2012-12-02T02:15:38Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="10791a94-8900-4d0c-b93d-0debb224882e" adminPass="mTxoVD3eALpv" OS-DCF:diskConfig="AUTO">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/10791a94-8900-4d0c-b93d-0debb224882e" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/10791a94-8900-4d0c-b93d-0debb224882e" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-get-resp.json b/doc/api_samples/OS-DCF/server-get-resp.json
new file mode 100644
index 000000000..4ad77036f
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-get-resp.json
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2012-12-02T02:11:55Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "c949ab4256cea23b6089b710aa2df48bf6577ed915278b62e33ad8bb",
+ "id": "5046e2f2-3b33-4041-b3cf-e085f73e78e7",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/5046e2f2-3b33-4041-b3cf-e085f73e78e7",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/5046e2f2-3b33-4041-b3cf-e085f73e78e7",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-02T02:11:55Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-get-resp.xml b/doc/api_samples/OS-DCF/server-get-resp.xml
new file mode 100644
index 000000000..3acba49eb
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-12-02T02:15:37Z" hostId="afa0a883de4743c7a0c164327bda5284b875c50e1a9e30de910ac126" name="new-server-test" created="2012-12-02T02:15:37Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="d5d844c0-ecc3-4202-90ed-2e85b7fe513d" OS-DCF:diskConfig="AUTO">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/d5d844c0-ecc3-4202-90ed-2e85b7fe513d" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/d5d844c0-ecc3-4202-90ed-2e85b7fe513d" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-post-req.json b/doc/api_samples/OS-DCF/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-post-req.xml b/doc/api_samples/OS-DCF/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-post-resp.json b/doc/api_samples/OS-DCF/server-post-resp.json
new file mode 100644
index 000000000..f5c1a70f1
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-post-resp.json
@@ -0,0 +1,17 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "CQH9gWzgkVno",
+ "id": "324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-post-resp.xml b/doc/api_samples/OS-DCF/server-post-resp.xml
new file mode 100644
index 000000000..c9439943c
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="900a4ef7-f374-413f-8816-52d3dbfaf498" adminPass="Cj6sZgWq85qm" OS-DCF:diskConfig="AUTO">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/900a4ef7-f374-413f-8816-52d3dbfaf498" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/900a4ef7-f374-413f-8816-52d3dbfaf498" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-resize-post-req.json b/doc/api_samples/OS-DCF/server-resize-post-req.json
new file mode 100644
index 000000000..55e11a31e
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-resize-post-req.json
@@ -0,0 +1,6 @@
+{
+ "resize": {
+ "flavorRef": "3",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-resize-post-req.xml b/doc/api_samples/OS-DCF/server-resize-post-req.xml
new file mode 100644
index 000000000..f0a299bca
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-resize-post-req.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<resize xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ OS-DCF:diskConfig="AUTO"
+ flavorRef="3" /> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-update-put-req.json b/doc/api_samples/OS-DCF/server-update-put-req.json
new file mode 100644
index 000000000..898ab886a
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-update-put-req.json
@@ -0,0 +1,5 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-update-put-req.xml b/doc/api_samples/OS-DCF/server-update-put-req.xml
new file mode 100644
index 000000000..f48d8e73d
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-update-put-req.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ OS-DCF:diskConfig="AUTO" /> \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-update-put-resp.json b/doc/api_samples/OS-DCF/server-update-put-resp.json
new file mode 100644
index 000000000..ce6e08455
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-update-put-resp.json
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2012-12-02T02:11:57Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07",
+ "id": "324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-02T02:11:58Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-DCF/server-update-put-resp.xml b/doc/api_samples/OS-DCF/server-update-put-resp.xml
new file mode 100644
index 000000000..cd5ff5561
--- /dev/null
+++ b/doc/api_samples/OS-DCF/server-update-put-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-12-02T02:15:40Z" hostId="e987bebf62599c59c4559b249d0f5f300a302d1e9ff22dd1f0c5c4b0" name="new-server-test" created="2012-12-02T02:15:40Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="900a4ef7-f374-413f-8816-52d3dbfaf498" OS-DCF:diskConfig="AUTO">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/900a4ef7-f374-413f-8816-52d3dbfaf498" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/900a4ef7-f374-413f-8816-52d3dbfaf498" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
index 337bc26db..ad3bcab5d 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
@@ -1,7 +1,7 @@
{
"server": {
- "OS-EXT-SRV-ATTR:host": "f852c7db3f344eec955c369b8478fef7",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "1169a68456af48238da47b1d5957a714",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"accessIPv4": "",
"accessIPv6": "",
@@ -13,7 +13,7 @@
}
]
},
- "created": "2012-09-13T17:48:30Z",
+ "created": "2012-11-15T19:27:04Z",
"flavor": {
"id": "1",
"links": [
@@ -23,8 +23,8 @@
}
]
},
- "hostId": "4f846b99d954c7eb75dcbb25e1b92ccc5e77ba74f2bf22c2d8dd24d5",
- "id": "0b57a8d2-b1d0-43d8-b81b-9ef446281bfa",
+ "hostId": "2dfce43c41dd288cfac3a5b4251742b3bd2b37c12eb5927e757d9b4c",
+ "id": "1fc2392e-5727-46af-bc21-317a4a3eb04c",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -36,11 +36,11 @@
},
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/0b57a8d2-b1d0-43d8-b81b-9ef446281bfa",
+ "href": "http://openstack.example.com/v2/openstack/servers/1fc2392e-5727-46af-bc21-317a4a3eb04c",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/0b57a8d2-b1d0-43d8-b81b-9ef446281bfa",
+ "href": "http://openstack.example.com/openstack/servers/1fc2392e-5727-46af-bc21-317a4a3eb04c",
"rel": "bookmark"
}
],
@@ -51,7 +51,7 @@
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-09-13T17:48:30Z",
+ "updated": "2012-11-15T19:27:04Z",
"user_id": "fake"
}
} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
index e8d340a97..4b42c3586 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-09-14T11:34:17Z" hostId="fbaa82dd8c1948d351484640a7165d88a846902e1db2cc24dbaa23da" name="new-server-test" created="2012-09-14T11:34:17Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="ef63354b-dea8-4608-b209-0235ea9d4c47" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="90dd91c6b74e48ab8ee0e1aecefbd6e7" OS-EXT-SRV-ATTR:hypervisor_hostname="None">
+<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-11-15T19:27:06Z" hostId="6b8205d183f40afad106dbeac44d3872151ef6f36790077ea2ea85fc" name="new-server-test" created="2012-11-15T19:27:05Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="ece641c1-51f5-4190-9342-d9751f28eead" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="80edfa5af48b4894b20eb1d9d2d4424e" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -14,6 +14,6 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/ef63354b-dea8-4608-b209-0235ea9d4c47" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/ef63354b-dea8-4608-b209-0235ea9d4c47" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/ece641c1-51f5-4190-9342-d9751f28eead" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/ece641c1-51f5-4190-9342-d9751f28eead" rel="bookmark"/>
</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
index f92181a34..db3de77f4 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
@@ -1,8 +1,8 @@
{
"servers": [
{
- "OS-EXT-SRV-ATTR:host": "c90b8445a27f4057ac2457d4f511a617",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "dd99797793774612b081a8be19bf721a",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"accessIPv4": "",
"accessIPv6": "",
@@ -14,7 +14,7 @@
}
]
},
- "created": "2012-09-14T09:30:19Z",
+ "created": "2012-11-15T19:27:05Z",
"flavor": {
"id": "1",
"links": [
@@ -24,8 +24,8 @@
}
]
},
- "hostId": "5cb28923c8cc3f45fca3dd884249a8bf98f8a81900dd4b244d446cfd",
- "id": "f678aaae-1430-409d-8a48-efa08b885b25",
+ "hostId": "146245c049213a54b8c2352751518fcb4c2befd1b942b45a5a705d35",
+ "id": "e0c3563a-84ef-4d0b-bb80-23392cd23882",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -37,11 +37,11 @@
},
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/f678aaae-1430-409d-8a48-efa08b885b25",
+ "href": "http://openstack.example.com/v2/openstack/servers/e0c3563a-84ef-4d0b-bb80-23392cd23882",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/f678aaae-1430-409d-8a48-efa08b885b25",
+ "href": "http://openstack.example.com/openstack/servers/e0c3563a-84ef-4d0b-bb80-23392cd23882",
"rel": "bookmark"
}
],
@@ -52,7 +52,7 @@
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-09-14T09:30:19Z",
+ "updated": "2012-11-15T19:27:05Z",
"user_id": "fake"
}
]
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
index 4fb7a2f82..8179a3bf9 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2012-09-14T11:34:18Z" hostId="6a0e019807cc227bcd091b89a45cc1c9eed430687ff2313e03ecfc84" name="new-server-test" created="2012-09-14T11:34:17Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="28f2c75f-61f3-44e5-b8c3-1725be74a831" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="d630e6a6e18b493bbe95c37df4144d58" OS-EXT-SRV-ATTR:hypervisor_hostname="None">
+ <server status="ACTIVE" updated="2012-11-15T19:27:06Z" hostId="b348a7376e2e61781829c9b45e63675aa0207632c25ce36c55a4fb2a" name="new-server-test" created="2012-11-15T19:27:06Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="3cadb9e9-f430-4f62-8b9b-3efb671ff1fa" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="2c4d049170fe409abc14942757d63a4e" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -15,7 +15,7 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/28f2c75f-61f3-44e5-b8c3-1725be74a831" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/28f2c75f-61f3-44e5-b8c3-1725be74a831" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/3cadb9e9-f430-4f62-8b9b-3efb671ff1fa" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/3cadb9e9-f430-4f62-8b9b-3efb671ff1fa" rel="bookmark"/>
</server>
</servers> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/server-post-req.json b/doc/api_samples/OS-EXT-STS/server-post-req.json
new file mode 100644
index 000000000..09366b4c9
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/OS-EXT-STS/server-post-req.xml b/doc/api_samples/OS-EXT-STS/server-post-req.xml
new file mode 100644
index 000000000..077dd7618
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/doc/api_samples/OS-EXT-STS/server-post-resp.json b/doc/api_samples/OS-EXT-STS/server-post-resp.json
new file mode 100644
index 000000000..db9ed3d6d
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "MVk5HPrazHcG",
+ "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/OS-EXT-STS/server-post-resp.xml b/doc/api_samples/OS-EXT-STS/server-post-resp.xml
new file mode 100644
index 000000000..68f0933c7
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" adminPass="MVk5HPrazHcG">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="bookmark"/>
+</server>
diff --git a/doc/api_samples/OS-EXT-STS/servers-detail-resp.json b/doc/api_samples/OS-EXT-STS/servers-detail-resp.json
new file mode 100644
index 000000000..6764bd883
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/servers-detail-resp.json
@@ -0,0 +1,59 @@
+{
+ "servers": [
+ {
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2012-12-05T07:34:10Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "585aa01f94eca692eff9f77ffe3eab866d8a819e97397e28c5c7df12",
+ "id": "030758aa-5c41-41c6-8fb4-29d44eb96a85",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/030758aa-5c41-41c6-8fb4-29d44eb96a85",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/030758aa-5c41-41c6-8fb4-29d44eb96a85",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-05T07:34:10Z",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-detail-resp.xml b/doc/api_samples/OS-EXT-STS/servers-detail-resp.xml
new file mode 100644
index 000000000..e4dafd70b
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/servers-detail-resp.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2012-12-05T07:35:57Z" hostId="20171312b8f2c42b69b09360e08d7fe257b2e021107be687d0302a96" name="new-server-test" created="2012-12-05T07:35:56Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="085c76aa-a58f-45b8-ba78-4d1e541d5f89" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/085c76aa-a58f-45b8-ba78-4d1e541d5f89" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/085c76aa-a58f-45b8-ba78-4d1e541d5f89" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-list-resp.json b/doc/api_samples/OS-EXT-STS/servers-list-resp.json
new file mode 100644
index 000000000..d0309cc1f
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/servers-list-resp.json
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-list-resp.xml b/doc/api_samples/OS-EXT-STS/servers-list-resp.xml
new file mode 100644
index 000000000..f1c3cabf4
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/servers-list-resp.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="7128d3b9-1993-402c-91ca-ed59a0193ffa">
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/7128d3b9-1993-402c-91ca-ed59a0193ffa" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/7128d3b9-1993-402c-91ca-ed59a0193ffa" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json b/doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json
new file mode 100644
index 000000000..42e0e21ce
--- /dev/null
+++ b/doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "vcpus": 8
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml b/doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml
new file mode 100644
index 000000000..92b51a866
--- /dev/null
+++ b/doc/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json b/doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json
new file mode 100644
index 000000000..3fd5fa1d7
--- /dev/null
+++ b/doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml b/doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml
new file mode 100644
index 000000000..6abd22641
--- /dev/null
+++ b/doc/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json b/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json
index b0e481a62..787f5f3b7 100644
--- a/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json
+++ b/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json
@@ -1,12 +1,12 @@
{
"flavor": {
- "OS-FLV-EXT-DATA:ephemeral": 30,
- "disk": 10,
- "id": "666",
"name": "flavortest",
"ram": 1024,
- "rxtx_factor": 2,
- "swap": 5,
- "vcpus": 2
+ "vcpus": 2,
+ "disk": 10,
+ "id": "666",
+ "rxtx_factor": 2.0,
+ "OS-FLV-EXT-DATA:ephemeral": 30,
+ "swap": 5
}
} \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml b/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml
index ec1ec2e2b..99aec19ed 100644
--- a/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml
+++ b/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml
@@ -1,11 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
- name="testflavor1"
+ name="flavortest"
ram="1024"
vcpus="2"
disk="10"
id="666"
swap="5"
- rxtx_factor="2"
+ rxtx_factor="2.0"
OS-FLV-EXT-DATA:ephemeral="30" /> \ No newline at end of file
diff --git a/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml b/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml
index c877553a1..7da3e2379 100644
--- a/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml
+++ b/doc/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="testflavor1" id="666" OS-FLV-EXT-DATA:ephemeral="30">
+<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="flavortest" id="666" OS-FLV-EXT-DATA:ephemeral="30">
<atom:link href="http://openstack.example.com/v2/openstack/flavors/666" rel="self"/>
<atom:link href="http://openstack.example.com/openstack/flavors/666" rel="bookmark"/>
</flavor> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index f4dfa0683..bd002c080 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -2,7 +2,7 @@
"extensions": [
{
"alias": "NMN",
- "description": "Multiple network support",
+ "description": "Multiple network support.",
"links": [],
"name": "Multinic",
"namespace": "http://docs.openstack.org/compute/ext/multinic/api/v1.1",
@@ -10,7 +10,7 @@
},
{
"alias": "OS-DCF",
- "description": "Disk Management Extension",
+ "description": "Disk Management Extension.",
"links": [],
"name": "DiskConfig",
"namespace": "http://docs.openstack.org/compute/ext/disk_config/api/v1.1",
@@ -26,7 +26,7 @@
},
{
"alias": "OS-EXT-STS",
- "description": "Extended Status support",
+ "description": "Extended Status support.",
"links": [],
"name": "ExtendedStatus",
"namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1",
@@ -34,7 +34,7 @@
},
{
"alias": "OS-FLV-DISABLED",
- "description": "Support to show the disabled status of a flavor",
+ "description": "Support to show the disabled status of a flavor.",
"links": [],
"name": "FlavorDisabled",
"namespace": "http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1",
@@ -42,7 +42,7 @@
},
{
"alias": "OS-FLV-EXT-DATA",
- "description": "Provide additional data for flavors",
+ "description": "Provide additional data for flavors.",
"links": [],
"name": "FlavorExtraData",
"namespace": "http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1",
@@ -50,7 +50,7 @@
},
{
"alias": "OS-SCH-HNT",
- "description": "Pass arbitrary key/value pairs to the scheduler",
+ "description": "Pass arbitrary key/value pairs to the scheduler.",
"links": [],
"name": "SchedulerHints",
"namespace": "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
@@ -65,8 +65,16 @@
"updated": "2011-09-20T00:00:00+00:00"
},
{
+ "alias": "os-agents",
+ "description": "Agents support.",
+ "links": [],
+ "name": "Agents",
+ "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2",
+ "updated": "2012-10-28T00:00:00-00:00"
+ },
+ {
"alias": "os-aggregates",
- "description": "Admin-only aggregate administration",
+ "description": "Admin-only aggregate administration.",
"links": [],
"name": "Aggregates",
"namespace": "http://docs.openstack.org/compute/ext/aggregates/api/v1.1",
@@ -74,15 +82,23 @@
},
{
"alias": "os-availability-zone",
- "description": "Add availability_zone to the Create Server v1.1 API",
+ "description": "Add availability_zone to the Create Server v1.1 API.",
"links": [],
"name": "AvailabilityZone",
"namespace": "http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1",
"updated": "2012-08-09T00:00:00+00:00"
},
{
+ "alias": "os-cells",
+ "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ",
+ "links": [],
+ "name": "Cells",
+ "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1",
+ "updated": "2011-09-21T00:00:00+00:00"
+ },
+ {
"alias": "os-certificates",
- "description": "Certificates support",
+ "description": "Certificates support.",
"links": [],
"name": "Certificates",
"namespace": "http://docs.openstack.org/compute/ext/certificates/api/v1.1",
@@ -97,8 +113,16 @@
"updated": "2011-12-16T00:00:00+00:00"
},
{
+ "alias": "os-cloudpipe-update",
+ "description": "Adds the ability to set the vpn ip/port for cloudpipe instances.",
+ "links": [],
+ "name": "CloudpipeUpdate",
+ "namespace": "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2",
+ "updated": "2012-11-14T00:00:00+00:00"
+ },
+ {
"alias": "os-config-drive",
- "description": "Config Drive Extension",
+ "description": "Config Drive Extension.",
"links": [],
"name": "ConfigDrive",
"namespace": "http://docs.openstack.org/compute/ext/config_drive/api/v1.1",
@@ -121,8 +145,16 @@
"updated": "2011-12-23T00:00:00+00:00"
},
{
+ "alias": "os-coverage",
+ "description": "Enable Nova Coverage.",
+ "links": [],
+ "name": "Coverage",
+ "namespace": "http://docs.openstack.org/compute/ext/coverage/api/v2",
+ "updated": "2012-10-15T00:00:00+00:00"
+ },
+ {
"alias": "os-create-server-ext",
- "description": "Extended support to the Create Server v1.1 API",
+ "description": "Extended support to the Create Server v1.1 API.",
"links": [],
"name": "Createserverext",
"namespace": "http://docs.openstack.org/compute/ext/createserverext/api/v1.1",
@@ -130,15 +162,23 @@
},
{
"alias": "os-deferred-delete",
- "description": "Instance deferred delete",
+ "description": "Instance deferred delete.",
"links": [],
"name": "DeferredDelete",
"namespace": "http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1",
"updated": "2011-09-01T00:00:00+00:00"
},
{
+ "alias": "os-fixed-ips",
+ "description": "Fixed IPs support.",
+ "links": [],
+ "name": "FixedIPs",
+ "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2",
+ "updated": "2012-10-18T13:25:27-06:00"
+ },
+ {
"alias": "os-flavor-access",
- "description": "Flavor access supprt",
+ "description": "Flavor access supprt.",
"links": [],
"name": "FlavorAccess",
"namespace": "http://docs.openstack.org/compute/ext/flavor_access/api/v2",
@@ -146,7 +186,7 @@
},
{
"alias": "os-flavor-extra-specs",
- "description": "Instance type (flavor) extra specs",
+ "description": "Instance type (flavor) extra specs.",
"links": [],
"name": "FlavorExtraSpecs",
"namespace": "http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1",
@@ -162,7 +202,7 @@
},
{
"alias": "os-flavor-rxtx",
- "description": "Support to show the rxtx status of a flavor",
+ "description": "Support to show the rxtx status of a flavor.",
"links": [],
"name": "FlavorRxtx",
"namespace": "http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1",
@@ -170,7 +210,7 @@
},
{
"alias": "os-flavor-swap",
- "description": "Support to show the swap status of a flavor",
+ "description": "Support to show the swap status of a flavor.",
"links": [],
"name": "FlavorSwap",
"namespace": "http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1",
@@ -178,7 +218,7 @@
},
{
"alias": "os-floating-ip-dns",
- "description": "Floating IP DNS support",
+ "description": "Floating IP DNS support.",
"links": [],
"name": "FloatingIpDns",
"namespace": "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1",
@@ -186,7 +226,7 @@
},
{
"alias": "os-floating-ip-pools",
- "description": "Floating IPs support",
+ "description": "Floating IPs support.",
"links": [],
"name": "FloatingIpPools",
"namespace": "http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1",
@@ -194,15 +234,39 @@
},
{
"alias": "os-floating-ips",
- "description": "Floating IPs support",
+ "description": "Floating IPs support.",
"links": [],
"name": "FloatingIps",
"namespace": "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1",
"updated": "2011-06-16T00:00:00+00:00"
},
{
+ "alias": "os-floating-ips-bulk",
+ "description": "Bulk handling of Floating IPs.",
+ "links": [],
+ "name": "FloatingIpsBulk",
+ "namespace": "http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2",
+ "updated": "2012-10-29T13:25:27-06:00"
+ },
+ {
+ "alias": "os-fping",
+ "description": "Fping Management Extension.",
+ "links": [],
+ "name": "Fping",
+ "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1",
+ "updated": "2012-07-06T00:00:00+00:00"
+ },
+ {
+ "alias": "os-hide-server-addresses",
+ "description": "Support hiding server addresses in certain states.",
+ "links": [],
+ "name": "HideServerAddresses",
+ "namespace": "http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1",
+ "updated": "2012-12-11T00:00:00+00:00"
+ },
+ {
"alias": "os-hosts",
- "description": "Admin-only host administration",
+ "description": "Admin-only host administration.",
"links": [],
"name": "Hosts",
"namespace": "http://docs.openstack.org/compute/ext/hosts/api/v1.1",
@@ -210,7 +274,7 @@
},
{
"alias": "os-hypervisors",
- "description": "Admin-only hypervisor administration",
+ "description": "Admin-only hypervisor administration.",
"links": [],
"name": "Hypervisors",
"namespace": "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1",
@@ -218,7 +282,7 @@
},
{
"alias": "os-instance_usage_audit_log",
- "description": "Admin-only Task Log Monitoring",
+ "description": "Admin-only Task Log Monitoring.",
"links": [],
"name": "OSInstanceUsageAuditLog",
"namespace": "http://docs.openstack.org/ext/services/api/v1.1",
@@ -226,7 +290,7 @@
},
{
"alias": "os-keypairs",
- "description": "Keypair Support",
+ "description": "Keypair Support.",
"links": [],
"name": "Keypairs",
"namespace": "http://docs.openstack.org/compute/ext/keypairs/api/v1.1",
@@ -234,7 +298,7 @@
},
{
"alias": "os-multiple-create",
- "description": "Allow multiple create in the Create Server v1.1 API",
+ "description": "Allow multiple create in the Create Server v1.1 API.",
"links": [],
"name": "MultipleCreate",
"namespace": "http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1",
@@ -242,15 +306,31 @@
},
{
"alias": "os-networks",
- "description": "Admin-only Network Management Extension",
+ "description": "Admin-only Network Management Extension.",
"links": [],
"name": "Networks",
- "namespace": "http://docs.openstack.org/compute/ext/networks/api/v1.1",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "updated": "2011-12-23T00:00:00+00:00"
+ },
+ {
+ "alias": "os-tenant-networks",
+ "description": "Tenant-based Network Management Extension.",
+ "links": [],
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "2011-12-23T00:00:00+00:00"
},
{
+ "alias": "os-networks-associate",
+ "description": "Network association support.",
+ "links": [],
+ "name": "NetworkAssociationSupport",
+ "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2",
+ "updated": "2012-11-19T00:00:00+00:00"
+ },
+ {
"alias": "os-quota-class-sets",
- "description": "Quota classes management support",
+ "description": "Quota classes management support.",
"links": [],
"name": "QuotaClasses",
"namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1",
@@ -258,7 +338,7 @@
},
{
"alias": "os-quota-sets",
- "description": "Quotas management support",
+ "description": "Quotas management support.",
"links": [],
"name": "Quotas",
"namespace": "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1",
@@ -266,7 +346,7 @@
},
{
"alias": "os-rescue",
- "description": "Instance rescue mode",
+ "description": "Instance rescue mode.",
"links": [],
"name": "Rescue",
"namespace": "http://docs.openstack.org/compute/ext/rescue/api/v1.1",
@@ -274,7 +354,7 @@
},
{
"alias": "os-security-groups",
- "description": "Security group support",
+ "description": "Security group support.",
"links": [],
"name": "SecurityGroups",
"namespace": "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1",
@@ -282,23 +362,39 @@
},
{
"alias": "os-server-diagnostics",
- "description": "Allow Admins to view server diagnostics through server action",
+ "description": "Allow Admins to view server diagnostics through server action.",
"links": [],
"name": "ServerDiagnostics",
"namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1",
"updated": "2011-12-21T00:00:00+00:00"
},
{
+ "alias": "os-server-password",
+ "description": "Server password support",
+ "links": [],
+ "name": "ServerPassword",
+ "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
+ "updated": "2012-11-29T00:00:00+00:00"
+ },
+ {
"alias": "os-server-start-stop",
- "description": "Start/Stop instance compute API support",
+ "description": "Start/Stop instance compute API support.",
"links": [],
"name": "ServerStartStop",
"namespace": "http://docs.openstack.org/compute/ext/servers/api/v1.1",
"updated": "2012-01-23T00:00:00+00:00"
},
{
+ "alias": "os-services",
+ "description": "Services support.",
+ "links": [],
+ "name": "Services",
+ "namespace": "http://docs.openstack.org/compute/ext/services/api/v2",
+ "updated": "2012-10-28T00:00:00-00:00"
+ },
+ {
"alias": "os-simple-tenant-usage",
- "description": "Simple tenant usage extension",
+ "description": "Simple tenant usage extension.",
"links": [],
"name": "SimpleTenantUsage",
"namespace": "http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1",
@@ -314,7 +410,7 @@
},
{
"alias": "os-user-data",
- "description": "Add user_data to the Create Server v1.1 API",
+ "description": "Add user_data to the Create Server v1.1 API.",
"links": [],
"name": "UserData",
"namespace": "http://docs.openstack.org/compute/ext/userdata/api/v1.1",
@@ -322,7 +418,7 @@
},
{
"alias": "os-virtual-interfaces",
- "description": "Virtual interface support",
+ "description": "Virtual interface support.",
"links": [],
"name": "VirtualInterfaces",
"namespace": "http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1",
@@ -330,7 +426,7 @@
},
{
"alias": "os-volumes",
- "description": "Volumes support",
+ "description": "Volumes support.",
"links": [],
"name": "Volumes",
"namespace": "http://docs.openstack.org/compute/ext/volumes/api/v1.1",
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index de18e5a2b..ebb1c4302 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -1,25 +1,25 @@
<?xml version='1.0' encoding='UTF-8'?>
<extensions xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
<extension alias="NMN" updated="2011-06-09T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multinic/api/v1.1" name="Multinic">
- <description>Multiple network support</description>
+ <description>Multiple network support.</description>
</extension>
<extension alias="OS-DCF" updated="2011-09-27T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" name="DiskConfig">
- <description>Disk Management Extension</description>
+ <description>Disk Management Extension.</description>
</extension>
<extension alias="OS-EXT-SRV-ATTR" updated="2011-11-03T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>Extended Server Attributes support.</description>
</extension>
<extension alias="OS-EXT-STS" updated="2011-11-03T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedStatus">
- <description>Extended Status support</description>
+ <description>Extended Status support.</description>
</extension>
<extension alias="OS-FLV-DISABLED" updated="2012-08-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" name="FlavorDisabled">
- <description>Support to show the disabled status of a flavor</description>
+ <description>Support to show the disabled status of a flavor.</description>
</extension>
<extension alias="OS-FLV-EXT-DATA" updated="2011-09-14T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" name="FlavorExtraData">
- <description>Provide additional data for flavors</description>
+ <description>Provide additional data for flavors.</description>
</extension>
<extension alias="OS-SCH-HNT" updated="2011-07-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/scheduler-hints/api/v2" name="SchedulerHints">
- <description>Pass arbitrary key/value pairs to the scheduler</description>
+ <description>Pass arbitrary key/value pairs to the scheduler.</description>
</extension>
<extension alias="os-admin-actions" updated="2011-09-20T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/admin-actions/api/v1.1" name="AdminActions">
<description>Enable admin-only server actions
@@ -28,14 +28,23 @@
resetNetwork, injectNetworkInfo, lock, unlock, createBackup
</description>
</extension>
+ <extension alias="os-agents" updated="2012-10-28T00:00:00-00:00" namespace="http://docs.openstack.org/compute/ext/agents/api/v2" name="Agents">
+ <description>Agents support.</description>
+ </extension>
<extension alias="os-aggregates" updated="2012-01-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/aggregates/api/v1.1" name="Aggregates">
- <description>Admin-only aggregate administration</description>
+ <description>Admin-only aggregate administration.</description>
</extension>
<extension alias="os-availability-zone" updated="2012-08-09T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
- <description>Add availability_zone to the Create Server v1.1 API</description>
+ <description>Add availability_zone to the Create Server v1.1 API.</description>
+ </extension>
+ <extension alias="os-cells" updated="2011-09-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells">
+ <description>Enables cells-related functionality such as adding child cells,
+ listing child cells, getting the capabilities of the local cell,
+ and returning build plans to parent cells' schedulers
+ </description>
</extension>
<extension alias="os-certificates" updated="2012-01-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates">
- <description>Certificates support</description>
+ <description>Certificates support.</description>
</extension>
<extension alias="os-cloudpipe" updated="2011-12-16T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1" name="Cloudpipe">
<description>Adds actions to create cloudpipe instances.
@@ -48,8 +57,11 @@
a SSH Bastion host is forthcoming.
</description>
</extension>
+ <extension alias="os-cloudpipe-update" updated="2012-11-14T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2" name="CloudpipeUpdate">
+ <description>Adds the ability to set the vpn ip/port for cloudpipe instances.</description>
+ </extension>
<extension alias="os-config-drive" updated="2012-07-16T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/config_drive/api/v1.1" name="ConfigDrive">
- <description>Config Drive Extension</description>
+ <description>Config Drive Extension.</description>
</extension>
<extension alias="os-console-output" updated="2011-12-08T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-console-output/api/v2" name="ConsoleOutput">
<description>Console log output support, with tailing ability.</description>
@@ -57,17 +69,23 @@
<extension alias="os-consoles" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-consoles/api/v2" name="Consoles">
<description>Interactive Console support.</description>
</extension>
+ <extension alias="os-coverage" updated="2012-10-15T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/coverage/api/v2" name="Coverage">
+ <description>Enable Nova Coverage.</description>
+ </extension>
<extension alias="os-create-server-ext" updated="2011-07-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/createserverext/api/v1.1" name="Createserverext">
- <description>Extended support to the Create Server v1.1 API</description>
+ <description>Extended support to the Create Server v1.1 API.</description>
</extension>
<extension alias="os-deferred-delete" updated="2011-09-01T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
- <description>Instance deferred delete</description>
+ <description>Instance deferred delete.</description>
+ </extension>
+ <extension alias="os-fixed-ips" updated="2012-10-18T13:25:27-06:00" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" name="FixedIPs">
+ <description>Fixed IPs support.</description>
</extension>
<extension alias="os-flavor-access" updated="2012-08-01T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_access/api/v2" name="FlavorAccess">
- <description>Flavor access supprt</description>
+ <description>Flavor access supprt.</description>
</extension>
<extension alias="os-flavor-extra-specs" updated="2011-06-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1" name="FlavorExtraSpecs">
- <description>Instance type (flavor) extra specs</description>
+ <description>Instance type (flavor) extra specs.</description>
</extension>
<extension alias="os-flavor-manage" updated="2012-01-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_manage/api/v1.1" name="FlavorManage">
<description>
@@ -75,69 +93,90 @@
</description>
</extension>
<extension alias="os-flavor-rxtx" updated="2012-08-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1" name="FlavorRxtx">
- <description>Support to show the rxtx status of a flavor</description>
+ <description>Support to show the rxtx status of a flavor.</description>
</extension>
<extension alias="os-flavor-swap" updated="2012-08-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1" name="FlavorSwap">
- <description>Support to show the swap status of a flavor</description>
+ <description>Support to show the swap status of a flavor.</description>
</extension>
<extension alias="os-floating-ip-dns" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/ext/floating_ip_dns/api/v1.1" name="FloatingIpDns">
- <description>Floating IP DNS support</description>
+ <description>Floating IP DNS support.</description>
</extension>
<extension alias="os-floating-ip-pools" updated="2012-01-04T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1" name="FloatingIpPools">
- <description>Floating IPs support</description>
+ <description>Floating IPs support.</description>
</extension>
<extension alias="os-floating-ips" updated="2011-06-16T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/floating_ips/api/v1.1" name="FloatingIps">
- <description>Floating IPs support</description>
+ <description>Floating IPs support.</description>
+ </extension>
+ <extension alias="os-floating-ips-bulk" updated="2012-10-29T13:25:27-06:00" namespace="http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2" name="FloatingIpsBulk">
+ <description>Bulk handling of Floating IPs.</description>
+ </extension>
+ <extension alias="os-fping" updated="2012-07-06T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/fping/api/v1.1" name="Fping">
+ <description>Fping Management Extension.</description>
+ </extension>
+ <extension alias="os-hide-server-addresses" updated="2012-12-11T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1" name="HideServerAddresses">
+ <description>Support hiding server addresses in certain states.</description>
</extension>
<extension alias="os-hosts" updated="2011-06-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/hosts/api/v1.1" name="Hosts">
- <description>Admin-only host administration</description>
+ <description>Admin-only host administration.</description>
</extension>
<extension alias="os-hypervisors" updated="2012-06-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/hypervisors/api/v1.1" name="Hypervisors">
- <description>Admin-only hypervisor administration</description>
+ <description>Admin-only hypervisor administration.</description>
</extension>
<extension alias="os-instance_usage_audit_log" updated="2012-07-06T01:00:00+00:00" namespace="http://docs.openstack.org/ext/services/api/v1.1" name="OSInstanceUsageAuditLog">
- <description>Admin-only Task Log Monitoring</description>
+ <description>Admin-only Task Log Monitoring.</description>
</extension>
<extension alias="os-keypairs" updated="2011-08-08T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/keypairs/api/v1.1" name="Keypairs">
- <description>Keypair Support</description>
+ <description>Keypair Support.</description>
</extension>
<extension alias="os-multiple-create" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
- <description>Allow multiple create in the Create Server v1.1 API</description>
+ <description>Allow multiple create in the Create Server v1.1 API.</description>
+ </extension>
+ <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
+ <description>Admin-only Network Management Extension.</description>
</extension>
- <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks/api/v1.1" name="Networks">
- <description>Admin-only Network Management Extension</description>
+ <extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
+ <description>Network association support.</description>
+ </extension>
+ <extension alias="os-tenant-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
+ <description>Tenant-based Network Management Extension.</description>
</extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
- <description>Quota classes management support</description>
+ <description>Quota classes management support.</description>
</extension>
<extension alias="os-quota-sets" updated="2011-08-08T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" name="Quotas">
- <description>Quotas management support</description>
+ <description>Quotas management support.</description>
</extension>
<extension alias="os-rescue" updated="2011-08-18T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/rescue/api/v1.1" name="Rescue">
- <description>Instance rescue mode</description>
+ <description>Instance rescue mode.</description>
</extension>
<extension alias="os-security-groups" updated="2011-07-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" name="SecurityGroups">
- <description>Security group support</description>
+ <description>Security group support.</description>
</extension>
<extension alias="os-server-diagnostics" updated="2011-12-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1" name="ServerDiagnostics">
- <description>Allow Admins to view server diagnostics through server action</description>
+ <description>Allow Admins to view server diagnostics through server action.</description>
+ </extension>
+ <extension alias="os-server-password" updated="2012-11-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/server-password/api/v2" name="ServerPassword">
+ <description>Server password support</description>
</extension>
<extension alias="os-server-start-stop" updated="2012-01-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
- <description>Start/Stop instance compute API support</description>
+ <description>Start/Stop instance compute API support.</description>
+ </extension>
+ <extension alias="os-services" updated="2012-10-28T00:00:00-00:00" namespace="http://docs.openstack.org/compute/ext/services/api/v2" name="Services">
+ <description>Services support.</description>
</extension>
<extension alias="os-simple-tenant-usage" updated="2011-08-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1" name="SimpleTenantUsage">
- <description>Simple tenant usage extension</description>
+ <description>Simple tenant usage extension.</description>
</extension>
<extension alias="os-used-limits" updated="2012-07-13T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" name="UsedLimits">
<description>Provide data on limited resources that are being used.</description>
</extension>
<extension alias="os-user-data" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/userdata/api/v1.1" name="UserData">
- <description>Add user_data to the Create Server v1.1 API</description>
+ <description>Add user_data to the Create Server v1.1 API.</description>
</extension>
<extension alias="os-virtual-interfaces" updated="2011-08-17T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1" name="VirtualInterfaces">
- <description>Virtual interface support</description>
+ <description>Virtual interface support.</description>
</extension>
<extension alias="os-volumes" updated="2011-03-25T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/volumes/api/v1.1" name="Volumes">
- <description>Volumes support</description>
+ <description>Volumes support.</description>
</extension>
</extensions>
diff --git a/doc/api_samples/all_extensions/flavor-get-resp.json b/doc/api_samples/all_extensions/flavor-get-resp.json
index bbd681e66..d37117d84 100644
--- a/doc/api_samples/all_extensions/flavor-get-resp.json
+++ b/doc/api_samples/all_extensions/flavor-get-resp.json
@@ -21,4 +21,4 @@
"swap": "",
"vcpus": 1
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/all_extensions/server-get-resp.json b/doc/api_samples/all_extensions/server-get-resp.json
index 3eb1870f9..a174bd779 100644
--- a/doc/api_samples/all_extensions/server-get-resp.json
+++ b/doc/api_samples/all_extensions/server-get-resp.json
@@ -1,8 +1,8 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-SRV-ATTR:host": "3972bc79fa2a4754b7559153a5a48422",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "543330fc7412414094e79c867798cefd",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
@@ -18,7 +18,7 @@
]
},
"config_drive": "",
- "created": "2012-08-20T21:11:06Z",
+ "created": "2012-11-15T19:28:30Z",
"flavor": {
"id": "1",
"links": [
@@ -28,8 +28,8 @@
}
]
},
- "hostId": "dc23873c80c22f14705d190b645b59398cbc8ed3cdf6145468051c0d",
- "id": "7dc62bde-85f0-45b5-8c74-5fb209314807",
+ "hostId": "edc4f072b6ca46a2d95c717401aa9835a204d3e4e6b148a7faba9ab0",
+ "id": "05c070bf-1c34-4d99-901c-0f97a7239b86",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -42,11 +42,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/7dc62bde-85f0-45b5-8c74-5fb209314807",
+ "href": "http://openstack.example.com/v2/openstack/servers/05c070bf-1c34-4d99-901c-0f97a7239b86",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/7dc62bde-85f0-45b5-8c74-5fb209314807",
+ "href": "http://openstack.example.com/openstack/servers/05c070bf-1c34-4d99-901c-0f97a7239b86",
"rel": "bookmark"
}
],
@@ -62,7 +62,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-08-20T21:11:06Z",
+ "updated": "2012-11-15T19:28:31Z",
"user_id": "fake"
}
} \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/server-get-resp.xml b/doc/api_samples/all_extensions/server-get-resp.xml
index 6f2d8587f..8751a79ca 100644
--- a/doc/api_samples/all_extensions/server-get-resp.xml
+++ b/doc/api_samples/all_extensions/server-get-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-08-29T18:31:11Z" hostId="28cb24b3eafec0079eaca92bb439843ccdbe0cc2597b3ad9956f2113" name="new-server-test" created="2012-08-29T18:31:11Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="f3053932-a09d-446d-ba6e-4303b6725317" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="6f18ef4ea265447d8fe1b957b1e23ab4" OS-EXT-SRV-ATTR:hypervisor_hostname="None" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-11-15T19:28:35Z" hostId="71b7ec711488460249e7b30d505a59e474454e58d379dbddb3655fa3" name="new-server-test" created="2012-11-15T19:28:35Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="72ecf76b-789e-4bc9-9807-e8bb4afe4f74" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="748c8668e4324a82949d0e7f7e3003e2" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -14,8 +14,8 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/f3053932-a09d-446d-ba6e-4303b6725317" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/f3053932-a09d-446d-ba6e-4303b6725317" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/72ecf76b-789e-4bc9-9807-e8bb4afe4f74" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/72ecf76b-789e-4bc9-9807-e8bb4afe4f74" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
diff --git a/doc/api_samples/all_extensions/servers-details-resp.json b/doc/api_samples/all_extensions/servers-details-resp.json
index dc2698fec..dd0b8ab05 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.json
+++ b/doc/api_samples/all_extensions/servers-details-resp.json
@@ -2,8 +2,8 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-SRV-ATTR:host": "1ed961df12f5434a8e54e0efd1049fa5",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "f2df66e47d1f427cbd106cf9058360cc",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
@@ -19,7 +19,7 @@
]
},
"config_drive": "",
- "created": "2012-09-12T17:16:15Z",
+ "created": "2012-11-15T19:28:29Z",
"flavor": {
"id": "1",
"links": [
@@ -29,8 +29,8 @@
}
]
},
- "hostId": "c161a6e3edcd047317a6cfbe599d7711850fd871210b62fdd2f6479e",
- "id": "1d0f60bc-8c90-425d-8c64-cdaa49cd2303",
+ "hostId": "7eedbc35a14388e24ec12917b1eb0bd5dc969619a0e367591d55d9ef",
+ "id": "7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -40,13 +40,14 @@
}
]
},
+ "key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/1d0f60bc-8c90-425d-8c64-cdaa49cd2303",
+ "href": "http://openstack.example.com/v2/openstack/servers/7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/1d0f60bc-8c90-425d-8c64-cdaa49cd2303",
+ "href": "http://openstack.example.com/openstack/servers/7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
"rel": "bookmark"
}
],
@@ -62,7 +63,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-09-12T17:16:15Z",
+ "updated": "2012-11-15T19:28:30Z",
"user_id": "fake"
}
]
diff --git a/doc/api_samples/all_extensions/servers-details-resp.xml b/doc/api_samples/all_extensions/servers-details-resp.xml
index c769c8ec1..bbef848d9 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.xml
+++ b/doc/api_samples/all_extensions/servers-details-resp.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2012-09-12T17:16:23Z" hostId="95bc84e46706592751395e8651c7d28cb0c86722ae372fe97fb27f35" name="new-server-test" created="2012-09-12T17:16:23Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="2e6a40cf-4abb-44ba-8a1c-fdb666110b42" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="530cfd748f4b4a24b3dc7015d5a0a02e" OS-EXT-SRV-ATTR:hypervisor_hostname="None" OS-DCF:diskConfig="AUTO">
+ <server status="ACTIVE" updated="2012-11-15T19:28:34Z" hostId="aa999c3533a60f2569f2fb142139cdc522b43f9d4153d80bc29bc6ad" name="new-server-test" created="2012-11-15T19:28:34Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="b82eb08d8d4042a99cdd2bfcc749e057" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -15,8 +15,8 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/2e6a40cf-4abb-44ba-8a1c-fdb666110b42" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/2e6a40cf-4abb-44ba-8a1c-fdb666110b42" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
diff --git a/doc/api_samples/limit-get-resp.json b/doc/api_samples/limit-get-resp.json
index 29b7b80b6..e11c3ee8e 100644
--- a/doc/api_samples/limit-get-resp.json
+++ b/doc/api_samples/limit-get-resp.json
@@ -4,36 +4,34 @@
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
+ "maxSecurityGroupRules": 20,
+ "maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
- "maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
- "maxSecurityGroups": 10,
- "maxSecurityGroupRules": 20
+ "maxTotalRAMSize": 51200
},
"rate": [
{
"limit": [
{
- "next-available": "2012-08-31T22:36:27Z",
+ "next-available": "2012-11-27T17:22:18Z",
"remaining": 10,
"unit": "MINUTE",
"value": 10,
"verb": "POST"
},
{
- "next-available": "2012-08-31T22:36:27Z",
+ "next-available": "2012-11-27T17:22:18Z",
"remaining": 10,
"unit": "MINUTE",
"value": 10,
"verb": "PUT"
},
{
- "next-available": "2012-08-31T22:36:27Z",
+ "next-available": "2012-11-27T17:22:18Z",
"remaining": 100,
"unit": "MINUTE",
"value": 100,
@@ -46,7 +44,7 @@
{
"limit": [
{
- "next-available": "2012-08-31T22:36:27Z",
+ "next-available": "2012-11-27T17:22:18Z",
"remaining": 50,
"unit": "DAY",
"value": 50,
@@ -59,7 +57,7 @@
{
"limit": [
{
- "next-available": "2012-08-31T22:36:27Z",
+ "next-available": "2012-11-27T17:22:18Z",
"remaining": 3,
"unit": "MINUTE",
"value": 3,
@@ -68,7 +66,20 @@
],
"regex": ".*changes-since.*",
"uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "2012-11-27T17:22:18Z",
+ "remaining": 12,
+ "unit": "HOUR",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
}
]
}
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/limit-get-resp.xml b/doc/api_samples/limit-get-resp.xml
index b3811a528..d8c197091 100644
--- a/doc/api_samples/limit-get-resp.xml
+++ b/doc/api_samples/limit-get-resp.xml
@@ -2,30 +2,31 @@
<limits xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
<rates>
<rate regex=".*" uri="*">
- <limit next-available="2012-08-31T22:36:23Z" unit="MINUTE" verb="POST" remaining="10" value="10"/>
- <limit next-available="2012-08-31T22:36:23Z" unit="MINUTE" verb="PUT" remaining="10" value="10"/>
- <limit next-available="2012-08-31T22:36:23Z" unit="MINUTE" verb="DELETE" remaining="100" value="100"/>
+ <limit next-available="2012-11-27T17:22:18Z" unit="MINUTE" verb="POST" remaining="10" value="10"/>
+ <limit next-available="2012-11-27T17:22:18Z" unit="MINUTE" verb="PUT" remaining="10" value="10"/>
+ <limit next-available="2012-11-27T17:22:18Z" unit="MINUTE" verb="DELETE" remaining="100" value="100"/>
</rate>
<rate regex="^/servers" uri="*/servers">
- <limit next-available="2012-08-31T22:36:23Z" unit="DAY" verb="POST" remaining="50" value="50"/>
+ <limit next-available="2012-11-27T17:22:18Z" unit="DAY" verb="POST" remaining="50" value="50"/>
</rate>
<rate regex=".*changes-since.*" uri="*changes-since*">
- <limit next-available="2012-08-31T22:36:23Z" unit="MINUTE" verb="GET" remaining="3" value="3"/>
+ <limit next-available="2012-11-27T17:22:18Z" unit="MINUTE" verb="GET" remaining="3" value="3"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="2012-11-27T17:22:18Z" unit="HOUR" verb="GET" remaining="12" value="12"/>
</rate>
</rates>
<absolute>
<limit name="maxServerMeta" value="128"/>
- <limit name="maxTotalInstances" value="10"/>
<limit name="maxPersonality" value="5"/>
<limit name="maxImageMeta" value="128"/>
<limit name="maxPersonalitySize" value="10240"/>
+ <limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
- <limit name="maxTotalVolumes" value="10"/>
+ <limit name="maxSecurityGroups" value="10"/>
<limit name="maxTotalCores" value="20"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
+ <limit name="maxTotalInstances" value="10"/>
<limit name="maxTotalRAMSize" value="51200"/>
- <limit name="maxSecurityGroups" value="10"/>
- <limit name="maxSecurityGroupRules" value="20"/>
</absolute>
-</limits>
+</limits> \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-post-req.json b/doc/api_samples/os-agents/agent-post-req.json
new file mode 100644
index 000000000..217993b17
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-post-req.json
@@ -0,0 +1,10 @@
+{
+ "agent": {
+ "hypervisor": "hypervisor",
+ "os": "os",
+ "architecture": "x86",
+ "version": "8.0",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "url": "xxxxxxxxxxxx"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-post-req.xml b/doc/api_samples/os-agents/agent-post-req.xml
new file mode 100644
index 000000000..be93e97ce
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-post-req.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <hypervisor>hypervisor</hypervisor>
+ <os>os</os>
+ <architecture>x86</architecture>
+ <version>8.0</version>
+ <md5hash>add6bb58e139be103324d04d82d8f545</md5hash>
+ <url>xxxxxxxxxxxx</url>
+</agent> \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-post-resp.json b/doc/api_samples/os-agents/agent-post-resp.json
new file mode 100644
index 000000000..418d11f50
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-post-resp.json
@@ -0,0 +1,11 @@
+{
+ "agent": {
+ "agent_id": "1",
+ "architecture": "x86",
+ "hypervisor": "hypervisor",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "os": "os",
+ "url": "xxxxxxxxxxxx",
+ "version": "8.0"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-post-resp.xml b/doc/api_samples/os-agents/agent-post-resp.xml
new file mode 100644
index 000000000..79f62b7fb
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-post-resp.xml
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<agent>
+ <url>xxxxxxxxxxxx</url>
+ <hypervisor>hypervisor</hypervisor>
+ <md5hash>add6bb58e139be103324d04d82d8f545</md5hash>
+ <version>8.0</version>
+ <architecture>x86</architecture>
+ <os>os</os>
+ <agent_id>1</agent_id>
+</agent> \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-update-put-req.json b/doc/api_samples/os-agents/agent-update-put-req.json
new file mode 100644
index 000000000..e4eaf5352
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-update-put-req.json
@@ -0,0 +1,7 @@
+{
+ "para": {
+ "url": "xxx://xxxx/xxx/xxx",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "version": "7.0"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-update-put-req.xml b/doc/api_samples/os-agents/agent-update-put-req.xml
new file mode 100644
index 000000000..f759880c1
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-update-put-req.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<para>
+ <version>7.0</version>
+ <url>xxx://xxxx/xxx/xxx</url>
+ <md5hash>add6bb58e139be103324d04d82d8f545</md5hash>
+</para> \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-update-put-resp.json b/doc/api_samples/os-agents/agent-update-put-resp.json
new file mode 100644
index 000000000..6b67222c8
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-update-put-resp.json
@@ -0,0 +1,8 @@
+{
+ "agent": {
+ "agent_id": "1",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "url": "xxx://xxxx/xxx/xxx",
+ "version": "7.0"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agent-update-put-resp.xml b/doc/api_samples/os-agents/agent-update-put-resp.xml
new file mode 100644
index 000000000..badf2750e
--- /dev/null
+++ b/doc/api_samples/os-agents/agent-update-put-resp.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<agent>
+ <url>xxx://xxxx/xxx/xxx</url>
+ <version>7.0</version>
+ <agent_id>1</agent_id>
+ <md5hash>add6bb58e139be103324d04d82d8f545</md5hash>
+</agent> \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agents-get-resp.json b/doc/api_samples/os-agents/agents-get-resp.json
new file mode 100644
index 000000000..36eac4ced
--- /dev/null
+++ b/doc/api_samples/os-agents/agents-get-resp.json
@@ -0,0 +1,13 @@
+{
+ "agents": [
+ {
+ "agent_id": "1",
+ "architecture": "x86",
+ "hypervisor": "hypervisor",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "os": "os",
+ "url": "xxxxxxxxxxxx",
+ "version": "8.0"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-agents/agents-get-resp.xml b/doc/api_samples/os-agents/agents-get-resp.xml
new file mode 100644
index 000000000..4194f62c9
--- /dev/null
+++ b/doc/api_samples/os-agents/agents-get-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<agents>
+ <agent url="xxxxxxxxxxxx" hypervisor="hypervisor" md5hash="add6bb58e139be103324d04d82d8f545" version="8.0" architecture="x86" os="os" agent_id="1"/>
+</agents> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregate-update-post-resp.json b/doc/api_samples/os-aggregates/aggregate-update-post-resp.json
index 81869e730..6636f0a17 100644
--- a/doc/api_samples/os-aggregates/aggregate-update-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregate-update-post-resp.json
@@ -1,13 +1,15 @@
{
"aggregate": {
"availability_zone": "nova2",
- "created_at": "2012-10-01T18:50:27.781065",
+ "created_at": "2012-12-04T12:04:27.075065",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova2"
+ },
"name": "newname",
- "updated_at": "2012-10-01T18:50:27.791392"
+ "updated_at": "2012-12-04T12:04:27.242597"
}
} \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml b/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml
index ad9498aa0..25227669b 100644
--- a/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregate-update-post-resp.xml
@@ -3,10 +3,12 @@
<name>newname</name>
<availability_zone>nova2</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:35.506667</created_at>
- <updated_at>2012-10-01 18:50:35.517397</updated_at>
+ <created_at>2012-12-04 12:04:30.245284</created_at>
+ <updated_at>2012-12-04 12:04:30.357795</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova2</availability_zone>
+ </metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json
index 518f4176a..1f7918ba8 100644
--- a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json
@@ -1,14 +1,16 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.511586",
+ "created_at": "2012-12-04T12:04:24.399784",
"deleted": false,
"deleted_at": null,
"hosts": [
- "581d29b9e3504d8a895caddb13839b15"
+ "0438c6a4e8d841ad823b801d681f4680"
],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml
index a4c9de5fd..ad11f3859 100644
--- a/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.xml
@@ -3,12 +3,14 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:35.236556</created_at>
+ <created_at>2012-12-04 12:04:27.574038</created_at>
<updated_at>None</updated_at>
<hosts>
- <host>7c9e00dbca5e4fb88538b021c0f933a5</host>
+ <host>392adba19dd449179804eaff16ff4a97</host>
</hosts>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregates-get-resp.json b/doc/api_samples/os-aggregates/aggregates-get-resp.json
index cde446e51..101a6584d 100644
--- a/doc/api_samples/os-aggregates/aggregates-get-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-get-resp.json
@@ -1,13 +1,15 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.048605",
+ "created_at": "2012-11-16T06:22:23.032493",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-aggregates/aggregates-get-resp.xml b/doc/api_samples/os-aggregates/aggregates-get-resp.xml
index be1349bd2..431e59cf4 100644
--- a/doc/api_samples/os-aggregates/aggregates-get-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-get-resp.xml
@@ -3,10 +3,12 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:34.764838</created_at>
+ <created_at>2012-11-16 06:22:25.587739</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
-</aggregate> \ No newline at end of file
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/doc/api_samples/os-aggregates/aggregates-list-get-resp.json b/doc/api_samples/os-aggregates/aggregates-list-get-resp.json
index 75b412b53..53d278c63 100644
--- a/doc/api_samples/os-aggregates/aggregates-list-get-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-list-get-resp.json
@@ -2,12 +2,14 @@
"aggregates": [
{
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.252869",
+ "created_at": "2012-11-16T06:22:23.361359",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml b/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml
index c5590855b..8d92e1466 100644
--- a/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-list-get-resp.xml
@@ -4,11 +4,13 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:34.970677</created_at>
+ <created_at>2012-11-16 06:22:25.935099</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
-</aggregates> \ No newline at end of file
+</aggregates>
diff --git a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json
index dc4806a4f..33b4702ef 100644
--- a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json
@@ -1,12 +1,13 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:26.604176",
+ "created_at": "2012-11-16T06:22:22.342791",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
"metadata": {
+ "availability_zone": "nova",
"key": "value"
},
"name": "name",
diff --git a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml
index 7eeefb8b7..5e2193d06 100644
--- a/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.xml
@@ -3,12 +3,13 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:34.313003</created_at>
+ <created_at>2012-11-16 06:22:24.864471</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
<metadata>
<key>value</key>
+ <availability_zone>nova</availability_zone>
</metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json
index 497fcb7fb..ba9d4e00a 100644
--- a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json
+++ b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json
@@ -1,12 +1,14 @@
{
"aggregate": {
"availability_zone": "nova",
- "created_at": "2012-10-01T18:50:27.511586",
+ "created_at": "2012-12-04T12:04:26.557909",
"deleted": false,
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml
index dc8a55330..33dce2838 100644
--- a/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml
+++ b/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml
@@ -3,10 +3,12 @@
<name>name</name>
<availability_zone>nova</availability_zone>
<deleted>False</deleted>
- <created_at>2012-10-01 18:50:35.236556</created_at>
+ <created_at>2012-12-04 12:04:29.722109</created_at>
<updated_at>None</updated_at>
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate> \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-get-resp.json b/doc/api_samples/os-cells/cells-get-resp.json
new file mode 100644
index 000000000..62eb8ec31
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-get-resp.json
@@ -0,0 +1,9 @@
+{
+ "cell": {
+ "name": "cell3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username3"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-get-resp.xml b/doc/api_samples/os-cells/cells-get-resp.xml
new file mode 100644
index 000000000..12256a5bd
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-get-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" username="username3" rpc_host="None" type="child" name="cell3" rpc_port="None"/> \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.json b/doc/api_samples/os-cells/cells-list-empty-resp.json
new file mode 100644
index 000000000..5325a4e85
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-empty-resp.json
@@ -0,0 +1,3 @@
+{
+ "cells": []
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.xml b/doc/api_samples/os-cells/cells-list-empty-resp.xml
new file mode 100644
index 000000000..6ac77b4bd
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-empty-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/> \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-resp.json b/doc/api_samples/os-cells/cells-list-resp.json
new file mode 100644
index 000000000..97ea4c6dd
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-resp.json
@@ -0,0 +1,39 @@
+{
+ "cells": [
+ {
+ "name": "cell1",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username1"
+ },
+ {
+ "name": "cell3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username3"
+ },
+ {
+ "name": "cell5",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username5"
+ },
+ {
+ "name": "cell2",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent",
+ "username": "username2"
+ },
+ {
+ "name": "cell4",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent",
+ "username": "username4"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-resp.xml b/doc/api_samples/os-cells/cells-list-resp.xml
new file mode 100644
index 000000000..7d697bb91
--- /dev/null
+++ b/doc/api_samples/os-cells/cells-list-resp.xml
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
+ <cell username="username1" rpc_host="None" type="child" name="cell1" rpc_port="None"/>
+ <cell username="username3" rpc_host="None" type="child" name="cell3" rpc_port="None"/>
+ <cell username="username5" rpc_host="None" type="child" name="cell5" rpc_port="None"/>
+ <cell username="username2" rpc_host="None" type="parent" name="cell2" rpc_port="None"/>
+ <cell username="username4" rpc_host="None" type="parent" name="cell4" rpc_port="None"/>
+</cells> \ No newline at end of file
diff --git a/doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json b/doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json
new file mode 100644
index 000000000..7882765b4
--- /dev/null
+++ b/doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json
@@ -0,0 +1,6 @@
+{
+ "configure_project": {
+ "vpn_ip": "192.168.1.1",
+ "vpn_port": "2000"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml b/doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml
new file mode 100644
index 000000000..253b0426a
--- /dev/null
+++ b/doc/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<configure_project>
+ <vpn_ip>192.168.1.1</vpn_ip>
+ <vpn_port>2000</vpn_port>
+</configure_project> \ No newline at end of file
diff --git a/doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json b/doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json
index 8c555bf99..a66b456c1 100644
--- a/doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json
+++ b/doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json
@@ -1,5 +1,3 @@
{
- "cloudpipe": {
- "instance_id": "72afecab-24b0-437e-b1d9-88a83be701b3"
- }
+ "instance_id": "1e9b8425-34af-488e-b969-4d46f4a6382e"
} \ No newline at end of file
diff --git a/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json b/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json
index 9efac48bb..d6773dfa5 100644
--- a/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json
+++ b/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json
@@ -1,15 +1,13 @@
{
"cloudpipes": [
{
- "cloudpipe": {
- "created_at": "2012-09-25T18:18:55Z",
- "instance_id": "72afecab-24b0-437e-b1d9-88a83be701b3",
- "internal_ip": "192.168.0.3",
- "project_id": "cloudpipe-059f21e3-c20e-4efc-9e7a-eba2ab3c6f9a",
- "public_ip": "127.0.0.1",
- "public_port": 22,
- "state": "down"
- }
+ "created_at": "2012-11-27T17:18:01Z",
+ "instance_id": "27deecdb-baa3-4a26-9c82-32994b815b01",
+ "internal_ip": "192.168.0.3",
+ "project_id": "cloudpipe-fa1765bd-a352-49c7-a6b7-8ee108a3cb0c",
+ "public_ip": "127.0.0.1",
+ "public_port": 22,
+ "state": "down"
}
]
} \ No newline at end of file
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-req.json b/doc/api_samples/os-consoles/get-vnc-console-post-req.json
new file mode 100644
index 000000000..1926119ce
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-req.json
@@ -0,0 +1,5 @@
+{
+ "os-getVNCConsole": {
+ "type": "novnc"
+ }
+}
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-req.xml b/doc/api_samples/os-consoles/get-vnc-console-post-req.xml
new file mode 100644
index 000000000..00f32c6b9
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getVNCConsole type="novnc" />
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-resp.json b/doc/api_samples/os-consoles/get-vnc-console-post-resp.json
new file mode 100644
index 000000000..4bcaf4686
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-resp.json
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "novnc",
+ "url": "http://example.com:6080/vnc_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3&title=dafa(75ecef58-3b8e-4659-ab3b-5501454188e9)"
+ }
+}
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml b/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
new file mode 100644
index 000000000..3c7e0c4a3
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>novnc</type>
+ <url>http://example.com:6080/vnc_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3</url>
+</console>
diff --git a/doc/api_samples/os-consoles/server-post-req.json b/doc/api_samples/os-consoles/server-post-req.json
new file mode 100644
index 000000000..09366b4c9
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/os-consoles/server-post-req.xml b/doc/api_samples/os-consoles/server-post-req.xml
new file mode 100644
index 000000000..077dd7618
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/doc/api_samples/os-consoles/server-post-resp.json b/doc/api_samples/os-consoles/server-post-resp.json
new file mode 100644
index 000000000..db9ed3d6d
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "MVk5HPrazHcG",
+ "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/os-consoles/server-post-resp.xml b/doc/api_samples/os-consoles/server-post-resp.xml
new file mode 100644
index 000000000..68f0933c7
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" adminPass="MVk5HPrazHcG">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="bookmark"/>
+</server>
diff --git a/doc/api_samples/os-coverage/coverage-report-post-req.json b/doc/api_samples/os-coverage/coverage-report-post-req.json
new file mode 100644
index 000000000..cb16cfc85
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-report-post-req.json
@@ -0,0 +1,5 @@
+{
+ "report" : {
+ "file" : "report"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-report-post-req.xml b/doc/api_samples/os-coverage/coverage-report-post-req.xml
new file mode 100644
index 000000000..1cc9b7433
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-report-post-req.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<report>
+ <file>report</file>
+</report> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-report-post-resp.json b/doc/api_samples/os-coverage/coverage-report-post-resp.json
new file mode 100644
index 000000000..e7db3076a
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-report-post-resp.json
@@ -0,0 +1,3 @@
+{
+ "path": "/tmp/tmpV0Pno7/nova-coverage_D8L8SB/report"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-report-post-resp.xml b/doc/api_samples/os-coverage/coverage-report-post-resp.xml
new file mode 100644
index 000000000..8b7188ad0
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-report-post-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<path>/tmp/tmpAqRtz5/nova-coverage_Iqja9E/report</path> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-start-combine-post-req.json b/doc/api_samples/os-coverage/coverage-start-combine-post-req.json
new file mode 100644
index 000000000..6d41ba598
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-start-combine-post-req.json
@@ -0,0 +1,5 @@
+{
+ "start" : {
+ "combine": true
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-start-combine-post-req.xml b/doc/api_samples/os-coverage/coverage-start-combine-post-req.xml
new file mode 100644
index 000000000..10489b3be
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-start-combine-post-req.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<start>
+ <combine>True</combine>
+</start> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-start-post-req.json b/doc/api_samples/os-coverage/coverage-start-post-req.json
new file mode 100644
index 000000000..8e5ef033c
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-start-post-req.json
@@ -0,0 +1,4 @@
+{
+ "start" : {
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-start-post-req.xml b/doc/api_samples/os-coverage/coverage-start-post-req.xml
new file mode 100644
index 000000000..398971d54
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-start-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<start></start> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-stop-post-req.json b/doc/api_samples/os-coverage/coverage-stop-post-req.json
new file mode 100644
index 000000000..57de88191
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-stop-post-req.json
@@ -0,0 +1,4 @@
+{
+ "stop" : {
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-stop-post-req.xml b/doc/api_samples/os-coverage/coverage-stop-post-req.xml
new file mode 100644
index 000000000..7092c81e2
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-stop-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<stop></stop> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-stop-post-resp.json b/doc/api_samples/os-coverage/coverage-stop-post-resp.json
new file mode 100644
index 000000000..d3caf3a5a
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-stop-post-resp.json
@@ -0,0 +1,3 @@
+{
+ "path": "/tmp/tmpua9HvB/nova-coverage_rs2CaS"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-stop-post-resp.xml b/doc/api_samples/os-coverage/coverage-stop-post-resp.xml
new file mode 100644
index 000000000..f0c921847
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-stop-post-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<path>/tmp/tmpCLve38/nova-coverage_GJ4BZ_</path> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-xml-report-post-req.json b/doc/api_samples/os-coverage/coverage-xml-report-post-req.json
new file mode 100644
index 000000000..bcbe0c4a0
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-xml-report-post-req.json
@@ -0,0 +1,6 @@
+{
+ "report": {
+ "xml": true,
+ "file": "report"
+ }
+}
diff --git a/doc/api_samples/os-coverage/coverage-xml-report-post-req.xml b/doc/api_samples/os-coverage/coverage-xml-report-post-req.xml
new file mode 100644
index 000000000..62e5bc6f4
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-xml-report-post-req.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<report>
+ <file>report</file>
+ <xml>True</xml>
+</report> \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-xml-report-post-resp.json b/doc/api_samples/os-coverage/coverage-xml-report-post-resp.json
new file mode 100644
index 000000000..0648b2ebc
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-xml-report-post-resp.json
@@ -0,0 +1,3 @@
+{
+ "path": "/tmp/tmp6kdYaa/nova-coverage_TOTUbz/report"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-coverage/coverage-xml-report-post-resp.xml b/doc/api_samples/os-coverage/coverage-xml-report-post-resp.xml
new file mode 100644
index 000000000..72f584003
--- /dev/null
+++ b/doc/api_samples/os-coverage/coverage-xml-report-post-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<path>/tmp/tmp4j87bp/nova-coverage_7ViTA7/report</path> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/force-delete-post-req.json b/doc/api_samples/os-deferred-delete/force-delete-post-req.json
new file mode 100644
index 000000000..3f1abb676
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/force-delete-post-req.json
@@ -0,0 +1,3 @@
+{
+ "forceDelete": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/force-delete-post-req.xml b/doc/api_samples/os-deferred-delete/force-delete-post-req.xml
new file mode 100644
index 000000000..ab3477c9c
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/force-delete-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<forceDelete /> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/restore-post-req.json b/doc/api_samples/os-deferred-delete/restore-post-req.json
new file mode 100644
index 000000000..0e526ff64
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/restore-post-req.json
@@ -0,0 +1,3 @@
+{
+ "restore": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/restore-post-req.xml b/doc/api_samples/os-deferred-delete/restore-post-req.xml
new file mode 100644
index 000000000..a43bef2f5
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/restore-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<restore /> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-req.json b/doc/api_samples/os-deferred-delete/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-req.xml b/doc/api_samples/os-deferred-delete/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-resp.json b/doc/api_samples/os-deferred-delete/server-post-resp.json
new file mode 100644
index 000000000..a81b795e1
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "jDje6SdBHGfQ",
+ "id": "e08e6d34-fcc1-480e-b11e-24a675b479f8",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-deferred-delete/server-post-resp.xml b/doc/api_samples/os-deferred-delete/server-post-resp.xml
new file mode 100644
index 000000000..1562cf716
--- /dev/null
+++ b/doc/api_samples/os-deferred-delete/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="43008037-cd16-436e-948d-e084d17c37eb" adminPass="eDu5JojvmLQC">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/43008037-cd16-436e-948d-e084d17c37eb" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/43008037-cd16-436e-948d-e084d17c37eb" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedip-post-req.json b/doc/api_samples/os-fixed-ips/fixedip-post-req.json
new file mode 100644
index 000000000..cf8ba0e0b
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedip-post-req.json
@@ -0,0 +1,3 @@
+{
+ "reserve": "None"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedip-post-req.xml b/doc/api_samples/os-fixed-ips/fixedip-post-req.xml
new file mode 100644
index 000000000..e29b685be
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedip-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reserve>None</reserve> \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedips-get-resp.json b/doc/api_samples/os-fixed-ips/fixedips-get-resp.json
new file mode 100644
index 000000000..d63c91559
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedips-get-resp.json
@@ -0,0 +1,8 @@
+{
+ "fixed_ip": {
+ "address": "192.168.1.1",
+ "cidr": "192.168.1.0/24",
+ "host": "host",
+ "hostname": "openstack"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedips-get-resp.xml b/doc/api_samples/os-fixed-ips/fixedips-get-resp.xml
new file mode 100644
index 000000000..a9676721f
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedips-get-resp.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<fixed_ip>
+ <cidr>192.168.1.0/24</cidr>
+ <hostname>openstack</hostname>
+ <host>host</host>
+ <address>192.168.1.1</address>
+</fixed_ip> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-manage/flavor-create-post-req.json b/doc/api_samples/os-flavor-manage/flavor-create-post-req.json
new file mode 100644
index 000000000..8a3830f09
--- /dev/null
+++ b/doc/api_samples/os-flavor-manage/flavor-create-post-req.json
@@ -0,0 +1,9 @@
+{
+ "flavor": {
+ "name": "test_flavor",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "10",
+ }
+}
diff --git a/doc/api_samples/os-flavor-manage/flavor-create-post-req.xml b/doc/api_samples/os-flavor-manage/flavor-create-post-req.xml
new file mode 100644
index 000000000..b90c108d0
--- /dev/null
+++ b/doc/api_samples/os-flavor-manage/flavor-create-post-req.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor>
+ <name>test_flavor</name>
+ <ram>1024</ram>
+ <vcpus>2</vcpus>
+ <disk>10</disk>
+ <id>10</id>
+</flavor>
diff --git a/doc/api_samples/os-flavor-manage/flavor-create-post-resp.json b/doc/api_samples/os-flavor-manage/flavor-create-post-resp.json
new file mode 100644
index 000000000..9280b047d
--- /dev/null
+++ b/doc/api_samples/os-flavor-manage/flavor-create-post-resp.json
@@ -0,0 +1,19 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "10",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/10",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/10",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "test_flavor",
+ "ram": 1024,
+ "vcpus": 2
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-manage/flavor-create-post-resp.xml b/doc/api_samples/os-flavor-manage/flavor-create-post-resp.xml
new file mode 100644
index 000000000..e213ddb31
--- /dev/null
+++ b/doc/api_samples/os-flavor-manage/flavor-create-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="test_flavor" id="10">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/10" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/10" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json
new file mode 100644
index 000000000..63eaddeb6
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml
new file mode 100644
index 000000000..78b430eca
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json
new file mode 100644
index 000000000..81e9c993c
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "rxtx_factor": 1.0,
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "rxtx_factor": 1.0,
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "rxtx_factor": 1.0,
+ "vcpus": 8
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml
new file mode 100644
index 000000000..05192e3f9
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" rxtx_factor="1.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json
new file mode 100644
index 000000000..b86a63df6
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "flavortest",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "100",
+ "rxtx_factor": 2.0
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml
new file mode 100644
index 000000000..7038e1b88
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="flavortest"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="100"
+ rxtx_factor="2.0" /> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json
new file mode 100644
index 000000000..5129dbeb8
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "100",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/100",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/100",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "flavortest",
+ "ram": 1024,
+ "rxtx_factor": 2.0,
+ "vcpus": 2
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml
new file mode 100644
index 000000000..8fb4f46be
--- /dev/null
+++ b/doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="flavortest" id="100" rxtx_factor="2.0">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/100" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/100" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json
new file mode 100644
index 000000000..15604fe2b
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml
new file mode 100644
index 000000000..5357967f3
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json
new file mode 100644
index 000000000..5bb94f348
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.json
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml
new file mode 100644
index 000000000..55b54f700
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-list-resp.xml
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json
new file mode 100644
index 000000000..83b94cea0
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.json
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "flavortest",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "100",
+ "swap": 5
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml
new file mode 100644
index 000000000..b604f9bdf
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-req.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="flavortest"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="100"
+ swap="5" /> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json
new file mode 100644
index 000000000..d8e75d381
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.json
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "100",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/100",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/100",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "flavortest",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml
new file mode 100644
index 000000000..7b779cf3f
--- /dev/null
+++ b/doc/api_samples/os-flavor-swap/flavor-swap-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="flavortest" id="100" swap="5">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/100" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/100" rel="bookmark"/>
+</flavor> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json
new file mode 100644
index 000000000..d174bad5a
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json
@@ -0,0 +1,8 @@
+{
+ "floating_ips_bulk_create" :
+ {
+ "ip_range": "192.168.1.0/24",
+ "pool": "nova",
+ "interface": "eth0"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml
new file mode 100644
index 000000000..1fc730509
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_create>
+<ip_range>192.168.1.0/24</ip_range>
+<pool>nova</pool>
+<interface>eth0</interface>
+</floating_ips_bulk_create> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json
new file mode 100644
index 000000000..ef1cbfb17
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json
@@ -0,0 +1,7 @@
+{
+ "floating_ips_bulk_create": {
+ "interface": "eth0",
+ "ip_range": "192.168.1.0/24",
+ "pool": "nova"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml
new file mode 100644
index 000000000..db80bbfc1
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_create>
+ <interface>eth0</interface>
+ <ip_range>192.168.1.0/24</ip_range>
+ <pool>nova</pool>
+</floating_ips_bulk_create> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json
new file mode 100644
index 000000000..df59c1a73
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json
@@ -0,0 +1,3 @@
+{
+ "ip_range": "192.168.1.0/24"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml
new file mode 100644
index 000000000..c40f28dc3
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<ip_range>192.168.1.0/24</ip_range> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json
new file mode 100644
index 000000000..166984b24
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json
@@ -0,0 +1,3 @@
+{
+ "floating_ips_bulk_delete": "192.168.1.0/24"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml
new file mode 100644
index 000000000..3d77af334
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_delete>192.168.1.0/24</floating_ips_bulk_delete> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json
new file mode 100644
index 000000000..0eaaf75ae
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json
@@ -0,0 +1,11 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml
new file mode 100644
index 000000000..4c3c8cd9c
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_info>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.3</address>
+ </item>
+</floating_ip_info> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json
new file mode 100644
index 000000000..de1e622bb
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json
@@ -0,0 +1,25 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.1",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.2",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml
new file mode 100644
index 000000000..6ef85bd87
--- /dev/null
+++ b/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_info>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.1</address>
+ </item>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.2</address>
+ </item>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.3</address>
+ </item>
+</floating_ip_info> \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/server-get-resp.json b/doc/api_samples/os-hide-server-addresses/server-get-resp.json
new file mode 100644
index 000000000..b4209f461
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/server-get-resp.json
@@ -0,0 +1,54 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2012-12-19T20:32:40Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "d2697421d9869915ee376575508fdba76432f9b3ef451e7a1f86c62e",
+ "id": "2e9157dc-deea-4ee7-820a-640ecba32b5a",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/2e9157dc-deea-4ee7-820a-640ecba32b5a",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/2e9157dc-deea-4ee7-820a-640ecba32b5a",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-19T20:32:42Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/server-get-resp.xml b/doc/api_samples/os-hide-server-addresses/server-get-resp.xml
new file mode 100644
index 000000000..caa4a2fa5
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/server-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-12-19T20:32:39Z" hostId="4ae230cb1f554a4a4e02b03bc79c2da5354c3051a70cef6298ace888" name="new-server-test" created="2012-12-19T20:32:37Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="b8b02dd4-b2bb-441d-b52a-ee5c89c832d2">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/b8b02dd4-b2bb-441d-b52a-ee5c89c832d2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/b8b02dd4-b2bb-441d-b52a-ee5c89c832d2" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/server-post-req.json b/doc/api_samples/os-hide-server-addresses/server-post-req.json
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/server-post-req.json
diff --git a/doc/api_samples/os-hide-server-addresses/server-post-req.xml b/doc/api_samples/os-hide-server-addresses/server-post-req.xml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/server-post-req.xml
diff --git a/doc/api_samples/os-hide-server-addresses/server-post-resp.json b/doc/api_samples/os-hide-server-addresses/server-post-resp.json
new file mode 100644
index 000000000..15282fde4
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "c47PrLGGmARP",
+ "id": "1130fd6e-4d52-4e2a-848f-89b28fa03a7a",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/1130fd6e-4d52-4e2a-848f-89b28fa03a7a",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/1130fd6e-4d52-4e2a-848f-89b28fa03a7a",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/server-post-resp.xml b/doc/api_samples/os-hide-server-addresses/server-post-resp.xml
new file mode 100644
index 000000000..446fe5a9c
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="cc81aaca-5c3c-4ac6-bffe-79eb7a12def2" adminPass="4Z9kWS2YcY9f">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/cc81aaca-5c3c-4ac6-bffe-79eb7a12def2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/cc81aaca-5c3c-4ac6-bffe-79eb7a12def2" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/servers-details-resp.json b/doc/api_samples/os-hide-server-addresses/servers-details-resp.json
new file mode 100644
index 000000000..3787cd707
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/servers-details-resp.json
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2012-12-19T20:32:37Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "11c1ae0bb109cd0467c62e96917cfa2956a9f3c1dce9a6565d80f5eb",
+ "id": "625a1d37-3515-49ad-91e2-21075a1b2ccf",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/625a1d37-3515-49ad-91e2-21075a1b2ccf",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/625a1d37-3515-49ad-91e2-21075a1b2ccf",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2012-12-19T20:32:39Z",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/servers-details-resp.xml b/doc/api_samples/os-hide-server-addresses/servers-details-resp.xml
new file mode 100644
index 000000000..0296cc167
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/servers-details-resp.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2012-12-19T20:32:42Z" hostId="120a7c150d2ad92ce84590d0780c67b9e8be59b7084d0bc292ec0378" name="new-server-test" created="2012-12-19T20:32:40Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="55db7849-8ec4-46dd-8897-492d82282f29">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/55db7849-8ec4-46dd-8897-492d82282f29" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/55db7849-8ec4-46dd-8897-492d82282f29" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/servers-list-resp.json b/doc/api_samples/os-hide-server-addresses/servers-list-resp.json
new file mode 100644
index 000000000..f0042254b
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/servers-list-resp.json
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "120eed64-e7db-409c-b565-38ce2bc90021",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/120eed64-e7db-409c-b565-38ce2bc90021",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/120eed64-e7db-409c-b565-38ce2bc90021",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hide-server-addresses/servers-list-resp.xml b/doc/api_samples/os-hide-server-addresses/servers-list-resp.xml
new file mode 100644
index 000000000..3225a4c36
--- /dev/null
+++ b/doc/api_samples/os-hide-server-addresses/servers-list-resp.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="7c3e9b55-f06c-499b-9d72-266e7806ce5a">
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/7c3e9b55-f06c-499b-9d72-266e7806ce5a" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/7c3e9b55-f06c-499b-9d72-266e7806ce5a" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-reboot.json b/doc/api_samples/os-hosts/host-get-reboot.json
new file mode 100644
index 000000000..a93e48953
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-reboot.json
@@ -0,0 +1,4 @@
+{
+ "host": "066bf157ab50481d8c607cfe584b2230",
+ "power_action": "reboot"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-reboot.xml b/doc/api_samples/os-hosts/host-get-reboot.xml
new file mode 100644
index 000000000..3b3fe27b5
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-reboot.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="ce8c5f8cde4a46ffb01dec0788ad4dfc" power_action="reboot"/> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-resp.json b/doc/api_samples/os-hosts/host-get-resp.json
new file mode 100644
index 000000000..83b5e38c6
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-resp.json
@@ -0,0 +1,31 @@
+{
+ "host": [
+ {
+ "resource": {
+ "cpu": 1,
+ "disk_gb": 1028,
+ "host": "5ca60c6792a1442f9471ff575443f94d",
+ "memory_mb": 8192,
+ "project": "(total)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "5ca60c6792a1442f9471ff575443f94d",
+ "memory_mb": 512,
+ "project": "(used_now)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "5ca60c6792a1442f9471ff575443f94d",
+ "memory_mb": 0,
+ "project": "(used_max)"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-resp.xml b/doc/api_samples/os-hosts/host-get-resp.xml
new file mode 100644
index 000000000..197812c87
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-resp.xml
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host>
+ <resource>
+ <project>(total)</project>
+ <memory_mb>8192</memory_mb>
+ <host>ecf3458ac6bf4a299cc2e0efa740f426</host>
+ <cpu>1</cpu>
+ <disk_gb>1028</disk_gb>
+ </resource>
+ <resource>
+ <project>(used_now)</project>
+ <memory_mb>512</memory_mb>
+ <host>ecf3458ac6bf4a299cc2e0efa740f426</host>
+ <cpu>0</cpu>
+ <disk_gb>0</disk_gb>
+ </resource>
+ <resource>
+ <project>(used_max)</project>
+ <memory_mb>0</memory_mb>
+ <host>ecf3458ac6bf4a299cc2e0efa740f426</host>
+ <cpu>0</cpu>
+ <disk_gb>0</disk_gb>
+ </resource>
+</host> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-shutdown.json b/doc/api_samples/os-hosts/host-get-shutdown.json
new file mode 100644
index 000000000..b584c9044
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-shutdown.json
@@ -0,0 +1,4 @@
+{
+ "host": "d2576862a2144ee6ad37d9e1938460a2",
+ "power_action": "shutdown"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-shutdown.xml b/doc/api_samples/os-hosts/host-get-shutdown.xml
new file mode 100644
index 000000000..4f5f1a758
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-shutdown.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="c82ca6da579440ac930ddee0e6530176" power_action="shutdown"/> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-startup.json b/doc/api_samples/os-hosts/host-get-startup.json
new file mode 100644
index 000000000..d66895ecf
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-startup.json
@@ -0,0 +1,4 @@
+{
+ "host": "57f5de2fa5b44f14974a4f50b9ffcbf8",
+ "power_action": "startup"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-get-startup.xml b/doc/api_samples/os-hosts/host-get-startup.xml
new file mode 100644
index 000000000..8d6959946
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-get-startup.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="7fae168ed18140d5a785ade2ac1bd420" power_action="startup"/> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-put-maintenance-req.json b/doc/api_samples/os-hosts/host-put-maintenance-req.json
new file mode 100644
index 000000000..6accac164
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-put-maintenance-req.json
@@ -0,0 +1,4 @@
+{
+ "status": "enable",
+ "maintenance_mode": "disable"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-put-maintenance-req.xml b/doc/api_samples/os-hosts/host-put-maintenance-req.xml
new file mode 100644
index 000000000..42b995b5d
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-put-maintenance-req.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<updates>
+ <status>enable</status>
+ <maintenance_mode>disable</maintenance_mode>
+</updates> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-put-maintenance-resp.json b/doc/api_samples/os-hosts/host-put-maintenance-resp.json
new file mode 100644
index 000000000..2feb88405
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-put-maintenance-resp.json
@@ -0,0 +1,5 @@
+{
+ "host": "0738dca90a8c43fdadd0be28715520e2",
+ "maintenance_mode": "off_maintenance",
+ "status": "enabled"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/host-put-maintenance-resp.xml b/doc/api_samples/os-hosts/host-put-maintenance-resp.xml
new file mode 100644
index 000000000..c4d165628
--- /dev/null
+++ b/doc/api_samples/os-hosts/host-put-maintenance-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host status="enabled" maintenance_mode="off_maintenance" host="d85f05519b57457c83da18c39fa8e00d"/> \ No newline at end of file
diff --git a/doc/api_samples/os-hosts/hosts-list-resp.json b/doc/api_samples/os-hosts/hosts-list-resp.json
new file mode 100644
index 000000000..0c4126a7e
--- /dev/null
+++ b/doc/api_samples/os-hosts/hosts-list-resp.json
@@ -0,0 +1,34 @@
+{
+ "hosts": [
+ {
+ "host_name": "787f4f6dda1b409bb8b2f9082349690e",
+ "service": "compute",
+ "zone": "nova"
+ },
+ {
+ "host_name": "a98b433151084aee8b1a986e28823b36",
+ "service": "cert",
+ "zone": "internal"
+ },
+ {
+ "host_name": "c56158d13a884a87abf9171efb7de9d8",
+ "service": "network",
+ "zone": "internal"
+ },
+ {
+ "host_name": "81d5cdcda0014918b3ebd3503a2e5c9a",
+ "service": "scheduler",
+ "zone": "internal"
+ },
+ {
+ "host_name": "6e48bfe1a3304b7b86154326328750ae",
+ "service": "conductor",
+ "zone": "internal"
+ },
+ {
+ "host_name": "39f55087a1024d1380755951c945ca69",
+ "service": "cells",
+ "zone": "internal"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-hosts/hosts-list-resp.xml b/doc/api_samples/os-hosts/hosts-list-resp.xml
new file mode 100644
index 000000000..9a99c577a
--- /dev/null
+++ b/doc/api_samples/os-hosts/hosts-list-resp.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hosts>
+ <host host_name="461697a871354212908d82bbb0f9f5ee" service="compute"/>
+ <host host_name="272ab5d262994ebdaf228935c8ecf57e" service="cert"/>
+ <host host_name="2d1bdd671b5d41fd89dec74be5770c63" service="network"/>
+ <host host_name="7c2dd5ecb7494dd1bf4240b7f7f9bf3a" service="scheduler"/>
+ <host host_name="f9c273d8e03141a2a01def0ad18e5be4" service="conductor"/>
+ <host host_name="2b893569cd824b979bd80a2c94570a1f" service="cells"/>
+</hosts>
diff --git a/doc/api_samples/os-networks-associate/network-associate-host-req.json b/doc/api_samples/os-networks-associate/network-associate-host-req.json
new file mode 100644
index 000000000..a6487211e
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-associate-host-req.json
@@ -0,0 +1,3 @@
+{
+ "associate_host": "testHost"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-associate-host-req.xml b/doc/api_samples/os-networks-associate/network-associate-host-req.xml
new file mode 100644
index 000000000..3221be61d
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-associate-host-req.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<associate_host>testHost</associate_host> \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-host-req.json b/doc/api_samples/os-networks-associate/network-disassociate-host-req.json
new file mode 100644
index 000000000..d6c5419fd
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-host-req.json
@@ -0,0 +1,3 @@
+{
+ "disassociate_host": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-host-req.xml b/doc/api_samples/os-networks-associate/network-disassociate-host-req.xml
new file mode 100644
index 000000000..3c2cc0d84
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-host-req.xml
@@ -0,0 +1 @@
+<disassociate_host/> \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-project-req.json b/doc/api_samples/os-networks-associate/network-disassociate-project-req.json
new file mode 100644
index 000000000..6c0e46730
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-project-req.json
@@ -0,0 +1,3 @@
+{
+ "disassociate_project": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-project-req.xml b/doc/api_samples/os-networks-associate/network-disassociate-project-req.xml
new file mode 100644
index 000000000..be94feb9f
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-project-req.xml
@@ -0,0 +1 @@
+<disassociate_project/> \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-req.json b/doc/api_samples/os-networks-associate/network-disassociate-req.json
new file mode 100644
index 000000000..66ab7cef0
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-req.json
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks-associate/network-disassociate-req.xml b/doc/api_samples/os-networks-associate/network-disassociate-req.xml
new file mode 100644
index 000000000..bcad8e0a8
--- /dev/null
+++ b/doc/api_samples/os-networks-associate/network-disassociate-req.xml
@@ -0,0 +1 @@
+<disassociate/> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
new file mode 100644
index 000000000..e4d0a5b47
--- /dev/null
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
@@ -0,0 +1,16 @@
+{
+ "quota_class_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "test_class",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml
new file mode 100644
index 000000000..74532bc98
--- /dev/null
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set id="test_class">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json
new file mode 100644
index 000000000..f074c829f
--- /dev/null
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json
@@ -0,0 +1,15 @@
+{
+ "quota_class_set": {
+ "instances": 50,
+ "cores": 50,
+ "ram": 51200,
+ "floating_ips": 10,
+ "metadata_items": 128,
+ "injected_files": 5,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "security_groups": 10,
+ "security_group_rules": 20,
+ "key_pairs": 100
+ }
+}
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml
new file mode 100644
index 000000000..d14785482
--- /dev/null
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set>
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
new file mode 100644
index 000000000..99a11f4ff
--- /dev/null
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
@@ -0,0 +1,15 @@
+{
+ "quota_class_set": {
+ "cores": 50,
+ "floating_ips": 10,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 50,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml
new file mode 100644
index 000000000..44c658a41
--- /dev/null
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
new file mode 100644
index 000000000..ee1f6a397
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
new file mode 100644
index 000000000..6a39c8506
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
new file mode 100644
index 000000000..ee1f6a397
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
new file mode 100644
index 000000000..6a39c8506
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-req.json b/doc/api_samples/os-quota-sets/quotas-update-post-req.json
new file mode 100644
index 000000000..1f12caa04
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-req.json
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-req.xml b/doc/api_samples/os-quota-sets/quotas-update-post-req.xml
new file mode 100644
index 000000000..4bb7b3a47
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-req.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <security_groups>45</security_groups>
+</quota_set> \ No newline at end of file
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
new file mode 100644
index 000000000..c16dc6bb5
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
@@ -0,0 +1,15 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45
+ }
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
new file mode 100644
index 000000000..126c3fced
--- /dev/null
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/doc/api_samples/os-server-password/get-password-resp.json b/doc/api_samples/os-server-password/get-password-resp.json
new file mode 100644
index 000000000..4becaf292
--- /dev/null
+++ b/doc/api_samples/os-server-password/get-password-resp.json
@@ -0,0 +1,3 @@
+{
+ "password": "xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg=="
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/get-password-resp.xml b/doc/api_samples/os-server-password/get-password-resp.xml
new file mode 100644
index 000000000..64b46a571
--- /dev/null
+++ b/doc/api_samples/os-server-password/get-password-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<password>xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg==</password> \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-req.json b/doc/api_samples/os-server-password/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-req.xml b/doc/api_samples/os-server-password/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-resp.json b/doc/api_samples/os-server-password/server-post-resp.json
new file mode 100644
index 000000000..0f477be49
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "78AtBtuxTqZV",
+ "id": "66fd64e1-de18-4506-bfb6-b5e73ef78a43",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/66fd64e1-de18-4506-bfb6-b5e73ef78a43",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/66fd64e1-de18-4506-bfb6-b5e73ef78a43",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-password/server-post-resp.xml b/doc/api_samples/os-server-password/server-post-resp.xml
new file mode 100644
index 000000000..cac50bc9b
--- /dev/null
+++ b/doc/api_samples/os-server-password/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="b68e3354-0b1a-4e92-a664-8b332cff27f5" adminPass="sLV7uLzmgoHu">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/b68e3354-0b1a-4e92-a664-8b332cff27f5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/b68e3354-0b1a-4e92-a664-8b332cff27f5" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-req.json b/doc/api_samples/os-server-start-stop/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-req.xml b/doc/api_samples/os-server-start-stop/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-resp.json b/doc/api_samples/os-server-start-stop/server-post-resp.json
new file mode 100644
index 000000000..09d9fb612
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "xrDLoBeMD28B",
+ "id": "3f69b6bd-00a8-4636-96ee-650093624304",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-resp.xml b/doc/api_samples/os-server-start-stop/server-post-resp.xml
new file mode 100644
index 000000000..7f84ac03d
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" adminPass="uF9wWxBh3mWL">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server_start_stop.xml b/doc/api_samples/os-server-start-stop/server_start_stop.xml
index 69aac4af4..974e415a5 100644
--- a/doc/api_samples/os-server-start-stop/server_start_stop.xml
+++ b/doc/api_samples/os-server-start-stop/server_start_stop.xml
@@ -1,2 +1,2 @@
<?xml version="1.0" encoding="UTF-8"?>
-<stop/>
+<os-stop/> \ No newline at end of file
diff --git a/doc/api_samples/os-tenant-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json
new file mode 100644
index 000000000..b857e8112
--- /dev/null
+++ b/doc/api_samples/os-tenant-networks/networks-list-res.json
@@ -0,0 +1,14 @@
+{
+ "networks": [
+ {
+ "cidr": "10.0.0.0/29",
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "label": "test_0"
+ },
+ {
+ "cidr": "10.0.0.8/29",
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd20",
+ "label": "test_1"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-tenant-networks/networks-post-res.json b/doc/api_samples/os-tenant-networks/networks-post-res.json
new file mode 100644
index 000000000..536a9a0a4
--- /dev/null
+++ b/doc/api_samples/os-tenant-networks/networks-post-res.json
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "cidr": "172.0.0.0/24",
+ "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "label": "public"
+ }
+}
diff --git a/doc/api_samples/os-used-limits/usedlimits-get-resp.json b/doc/api_samples/os-used-limits/usedlimits-get-resp.json
index e24a1b8bb..c5593b7e7 100644
--- a/doc/api_samples/os-used-limits/usedlimits-get-resp.json
+++ b/doc/api_samples/os-used-limits/usedlimits-get-resp.json
@@ -12,35 +12,31 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
- "totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
- "totalVolumeGigabytesUsed": 0,
- "totalVolumesUsed": 0
+ "totalFloatingIpsUsed": 0
},
"rate": [
{
"limit": [
{
- "next-available": "2012-10-04T15:38:20Z",
+ "next-available": "2012-11-27T17:24:52Z",
"remaining": 10,
"unit": "MINUTE",
"value": 10,
"verb": "POST"
},
{
- "next-available": "2012-10-04T15:38:20Z",
+ "next-available": "2012-11-27T17:24:52Z",
"remaining": 10,
"unit": "MINUTE",
"value": 10,
"verb": "PUT"
},
{
- "next-available": "2012-10-04T15:38:20Z",
+ "next-available": "2012-11-27T17:24:52Z",
"remaining": 100,
"unit": "MINUTE",
"value": 100,
@@ -53,7 +49,7 @@
{
"limit": [
{
- "next-available": "2012-10-04T15:38:20Z",
+ "next-available": "2012-11-27T17:24:52Z",
"remaining": 50,
"unit": "DAY",
"value": 50,
@@ -66,7 +62,7 @@
{
"limit": [
{
- "next-available": "2012-10-04T15:38:20Z",
+ "next-available": "2012-11-27T17:24:52Z",
"remaining": 3,
"unit": "MINUTE",
"value": 3,
@@ -75,6 +71,19 @@
],
"regex": ".*changes-since.*",
"uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "2012-11-27T17:24:52Z",
+ "remaining": 12,
+ "unit": "HOUR",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
}
]
}
diff --git a/doc/api_samples/os-used-limits/usedlimits-get-resp.xml b/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
index ae14c8158..c2b0572e5 100644
--- a/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
+++ b/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
@@ -2,37 +2,36 @@
<limits xmlns:os-used-limits="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
<rates>
<rate regex=".*" uri="*">
- <limit next-available="2012-10-04T15:38:36Z" unit="MINUTE" verb="POST" remaining="10" value="10"/>
- <limit next-available="2012-10-04T15:38:36Z" unit="MINUTE" verb="PUT" remaining="10" value="10"/>
- <limit next-available="2012-10-04T15:38:36Z" unit="MINUTE" verb="DELETE" remaining="100" value="100"/>
+ <limit next-available="2012-11-27T17:24:53Z" unit="MINUTE" verb="POST" remaining="10" value="10"/>
+ <limit next-available="2012-11-27T17:24:53Z" unit="MINUTE" verb="PUT" remaining="10" value="10"/>
+ <limit next-available="2012-11-27T17:24:53Z" unit="MINUTE" verb="DELETE" remaining="100" value="100"/>
</rate>
<rate regex="^/servers" uri="*/servers">
- <limit next-available="2012-10-04T15:38:36Z" unit="DAY" verb="POST" remaining="50" value="50"/>
+ <limit next-available="2012-11-27T17:24:53Z" unit="DAY" verb="POST" remaining="50" value="50"/>
</rate>
<rate regex=".*changes-since.*" uri="*changes-since*">
- <limit next-available="2012-10-04T15:38:36Z" unit="MINUTE" verb="GET" remaining="3" value="3"/>
+ <limit next-available="2012-11-27T17:24:53Z" unit="MINUTE" verb="GET" remaining="3" value="3"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="2012-11-27T17:24:53Z" unit="HOUR" verb="GET" remaining="12" value="12"/>
</rate>
</rates>
<absolute>
<limit name="maxServerMeta" value="128"/>
- <limit name="maxTotalInstances" value="10"/>
<limit name="maxPersonality" value="5"/>
<limit name="maxImageMeta" value="128"/>
<limit name="maxPersonalitySize" value="10240"/>
- <limit name="totalVolumesUsed" value="0"/>
<limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
- <limit name="totalCoresUsed" value="0"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="totalRAMUsed" value="0"/>
<limit name="totalInstancesUsed" value="0"/>
<limit name="maxSecurityGroups" value="10"/>
- <limit name="totalVolumeGigabytesUsed" value="0"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
<limit name="maxTotalCores" value="20"/>
<limit name="totalSecurityGroupsUsed" value="0"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="totalKeyPairsUsed" value="0"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
+ <limit name="maxTotalInstances" value="10"/>
+ <limit name="totalCoresUsed" value="0"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
</limits> \ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b52bcad0d..804080e79 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -154,7 +154,9 @@ man_pages = [
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
- [u'OpenStack'], 1)
+ [u'OpenStack'], 1),
+ ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
+ [u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index 4187f9278..4eb695963 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -66,7 +66,7 @@ On Ubuntu::
On Ubuntu Precise (12.04) you may also need to add the following packages::
- sudo apt-get install build-dep python-mysqldb
+ sudo apt-get build-dep python-mysqldb
On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux)::
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 62909a6e1..63ed95c82 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -32,10 +32,16 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
image properties contained in the instance.
* |AvailabilityZoneFilter| - filters hosts by availability zone. It passes
hosts matching the availability zone specified in the instance properties.
-* |ComputeCapabilityFilter| - checks that the capabilities provided by the
+* |ComputeCapabilitiesFilter| - checks that the capabilities provided by the
host compute service satisfy any extra specifications associated with the
- instance type (that have no scope, see |TrustedFilter| for details). It
- passes hosts that can create the specified instance type.
+ instance type. It passes hosts that can create the specified instance type.
+
+ The extra specifications can have a scope at the beginning of the key string
+ of a key/value pair. The scope format is "scope:key" and can be nested,
+ i.e. key_string := scope:key_string. Example like "capabilities:cpu_info:
+ features" is valid scope format. A key string without any ':' is non-scope
+ format. Each filter defines it's valid scope, and not all filters accept
+ non-scope format.
The extra specifications can have an operator at the beginning of the value
string of a key/value pair. If there is no operator specified, then a
@@ -63,7 +69,7 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
satisfies any extra specifications associated with the instance type (that
have no scope). It passes hosts that can create the specified instance type.
The extra specifications can have the same operators as
- |ComputeCapabilityFilter|.
+ |ComputeCapabilitiesFilter|.
* |ComputeFilter| - passes all hosts that are operational and enabled.
* |CoreFilter| - filters based on CPU core utilization. It passes hosts with
sufficient number of CPU cores.
@@ -128,11 +134,14 @@ hypervisor_type=qemu`
Only hosts that satisfy these requirements will pass the
|ImagePropertiesFilter|.
-|ComputeCapabilitesFilter| checks if the host satisfies any 'extra specs'
-specified on the instance type. The 'extra specs' can contain key/value pairs,
-and the |ComputeCapabilitiesFilter| will only pass hosts whose capabilities
-satisfy the requested specifications. All hosts are passed if no 'extra specs'
-are specified.
+|ComputeCapabilitiesFilter| checks if the host satisfies any 'extra specs'
+specified on the instance type. The 'extra specs' can contain key/value pairs.
+The key for the filter is either non-scope format (i.e. no ':' contained), or
+scope format in capabilities scope (i.e. 'capabilities:xxx:yyy'). One example
+of capabilities scope is "capabilities:cpu_info:features", which will match
+host's cpu features capabilities. The |ComputeCapabilitiesFilter| will only
+pass hosts whose capabilities satisfy the requested specifications. All hosts
+are passed if no 'extra specs' are specified.
|ComputeFilter| is quite simple and passes any host whose compute service is
enabled and operational.
@@ -179,9 +188,9 @@ The |RetryFilter| filters hosts that have already been attempted for scheduling.
It only passes hosts that have not been previously attempted.
The |TrustedFilter| filters hosts based on their trust. Only passes hosts
-that match the trust requested in the `extra_specs' for the flavor. The key
-for this filter is `trust:trusted_host', where `trust' is the scope of the
-key and `trusted_host' is the actual key value'.
+that match the trust requested in the `extra_specs' for the flavor. The key
+for this filter must be scope format as `trust:trusted_host', where `trust'
+is the scope of the key and `trusted_host' is the actual key value.
The value of this pair (`trusted'/`untrusted') must match the
integrity of a host (obtained from the Attestation service) before it is
passed by the |TrustedFilter|.
@@ -198,11 +207,11 @@ The default values for these settings in nova.conf are:
::
--scheduler_available_filters=nova.scheduler.filters.standard_filters
- --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilityFilter,ImagePropertiesFilter
+ --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
With this configuration, all filters in `nova.scheduler.filters`
would be available, and by default the |RamFilter|, |ComputeFilter|,
-|AvailabilityZoneFilter|, |ComputeCapabilityFilter|, and
+|AvailabilityZoneFilter|, |ComputeCapabilitiesFilter|, and
|ImagePropertiesFilter| would be used.
If you want to create **your own filter** you just need to inherit from
@@ -333,3 +342,4 @@ in :mod:`nova.tests.scheduler`.
.. |TrustedFilter| replace:: :class:`TrustedFilter <nova.scheduler.filters.trusted_filter.TrustedFilter>`
.. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter <nova.scheduler.filters.type_filter.TypeAffinityFilter>`
.. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter <nova.scheduler.filters.type_filter.AggregateTypeAffinityFilter>`
+.. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter <nova.scheduler.filters.aggregate_instance_extra_specs.AggregateInstanceExtraSpecsFilter>`
diff --git a/doc/source/devref/hooks.rst b/doc/source/devref/hooks.rst
new file mode 100644
index 000000000..a53e37352
--- /dev/null
+++ b/doc/source/devref/hooks.rst
@@ -0,0 +1,57 @@
+Hooks
+=====
+
+Hooks provide a mechanism to extend Nova with custom code through a plugin
+mechanism.
+
+Named hooks are added to nova code via a decorator that will lazily load
+plugin code matching the name. The loading works via setuptools
+`entry points`_.
+
+.. _`entry points`: http://packages.python.org/distribute/pkg_resources.html#entry-points
+
+What are hooks good for?
+------------------------
+
+Hooks are good for anchoring your custom code to Nova internal APIs.
+
+What are hooks NOT good for?
+----------------------------
+
+Hooks should not be used when API stability is a key factor. Internal APIs may
+change. Consider using a notification driver if this is important to you.
+
+Declaring hooks in the Nova codebase
+------------------------------------
+
+The following example declares a *resize_hook* around the *resize_instance* method::
+
+ from nova import hooks
+
+ @hooks.add_hook("resize_hook")
+ def resize_instance(self, context, instance, a=1, b=2):
+ ...
+
+Hook objects can now be attached via entry points to the *resize_hook*.
+
+Adding hook object code
+-----------------------
+
+1. Setup a Python package with a setup.py file.
+2. Add the following to the setup.py setup call::
+
+ entry_points = [
+ 'nova.hooks': [
+ 'resize_hook': your_package.hooks.YourHookClass,
+ ]
+ ]
+
+3. *YourHookClass* should be an object with *pre* and/or *post* methods::
+
+ class YourHookClass(object):
+
+ def pre(self, *args, **kwargs):
+ ....
+
+ def post(self, rv, *args, **kwargs):
+ ....
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 239848c62..0b7883f7b 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -43,6 +43,7 @@ Background Concepts for Nova
filter_scheduler
multinic
rpc
+ hooks
Other Resources
---------------
diff --git a/doc/source/man/nova-baremetal-deploy-helper.rst b/doc/source/man/nova-baremetal-deploy-helper.rst
new file mode 100644
index 000000000..106cb85e7
--- /dev/null
+++ b/doc/source/man/nova-baremetal-deploy-helper.rst
@@ -0,0 +1,52 @@
+============================
+nova-baremetal-deploy-helper
+============================
+
+------------------------------------------------------------------
+Writes images to a bare-metal node and switch it to instance-mode
+------------------------------------------------------------------
+
+:Author: openstack@lists.launchpad.net
+:Date: 2012-10-17
+:Copyright: OpenStack LLC
+:Version: 2013.1
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+ nova-baremetal-deploy-helper
+
+DESCRIPTION
+===========
+
+This is a service which should run on nova-compute host when using the
+baremetal driver. During a baremetal node's first boot,
+nova-baremetal-deploy-helper works in conjunction with diskimage-builder's
+"deploy" ramdisk to write an image from glance onto the baremetal node's disks
+using iSCSI. After that is complete, nova-baremetal-deploy-helper switches the
+PXE config to reference the kernel and ramdisk which correspond to the running
+image.
+
+OPTIONS
+=======
+
+ **General options**
+
+FILES
+========
+
+* /etc/nova/nova.conf
+* /etc/nova/rootwrap.conf
+* /etc/nova/rootwrap.d/
+
+SEE ALSO
+========
+
+* `OpenStack Nova <http://nova.openstack.org>`__
+
+BUGS
+====
+
+* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/doc/source/man/nova-baremetal-manage.rst b/doc/source/man/nova-baremetal-manage.rst
new file mode 100644
index 000000000..1fab368e5
--- /dev/null
+++ b/doc/source/man/nova-baremetal-manage.rst
@@ -0,0 +1,67 @@
+=====================
+nova-baremetal-manage
+=====================
+
+------------------------------------------------------
+Manage bare-metal DB in OpenStack Nova
+------------------------------------------------------
+
+:Author: openstack@lists.launchpad.net
+:Date: 2012-10-17
+:Copyright: OpenStack LLC
+:Version: 2013.1
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+ nova-baremetal-manage <category> <action> [<args>]
+
+DESCRIPTION
+===========
+
+nova-baremetal-manage manages bare-metal DB schema.
+
+OPTIONS
+=======
+
+The standard pattern for executing a nova-baremetal-manage command is:
+``nova-baremetal-manage <category> <command> [<args>]``
+
+Run without arguments to see a list of available command categories:
+``nova-baremetal-manage``
+
+Categories are db. Detailed descriptions are below.
+
+You can also run with a category argument such as "db" to see a list of all commands in that category:
+``nova-baremetal-manage db``
+
+These sections describe the available categories and arguments for nova-baremetal-manage.
+
+Bare-Metal DB
+~~~~~~~~~~~~~
+
+``nova-baremetal-manage db version``
+
+ Print the current database version.
+
+``nova-baremetal-manage db sync``
+
+ Sync the database up to the most recent version. This is the standard way to create the db as well.
+
+FILES
+========
+
+/etc/nova/nova.conf: get location of bare-metal DB
+
+SEE ALSO
+========
+
+* `OpenStack Nova <http://nova.openstack.org>`__
+
+BUGS
+====
+
+* Nova is maintained in Launchpad so you can view current bugs at `OpenStack Nova <https://bugs.launchpad.net/nova>`__
+
diff --git a/doc/source/man/nova-cert.rst b/doc/source/man/nova-cert.rst
index ea176a4cd..f8c6d0c2d 100644
--- a/doc/source/man/nova-cert.rst
+++ b/doc/source/man/nova-cert.rst
@@ -21,7 +21,7 @@ SYNOPSIS
DESCRIPTION
===========
-nova-cert is a server daemon that serves the Nova Cert service for X509 certificates.
+nova-cert is a server daemon that serves the Nova Cert service for X509 certificates. Used to generate certificates for euca-bundle-image. Only needed for EC2 API.
OPTIONS
=======
diff --git a/doc/source/man/nova-conductor.rst b/doc/source/man/nova-conductor.rst
new file mode 100644
index 000000000..7a32730e1
--- /dev/null
+++ b/doc/source/man/nova-conductor.rst
@@ -0,0 +1,45 @@
+==========
+nova-conductor
+==========
+
+--------------------------------
+Server for the Nova Conductor
+--------------------------------
+
+:Author: openstack@lists.launchpad.net
+:Date: 2012-11-16
+:Copyright: OpenStack LLC
+:Version: 2012.1
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+ nova-conductor [options]
+
+DESCRIPTION
+===========
+
+nova-conductor is a server daemon that serves the Nova Conductor service, which provides coordination and database query support for Nova.
+
+OPTIONS
+=======
+
+ **General options**
+
+FILES
+========
+
+* /etc/nova/nova.conf
+
+SEE ALSO
+========
+
+* `OpenStack Nova <http://nova.openstack.org>`__
+* `OpenStack Nova <http://nova.openstack.org>`__
+
+BUGS
+====
+
+* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/doc/source/man/nova-novncproxy.rst b/doc/source/man/nova-novncproxy.rst
index 92371a1a1..5302fd063 100644
--- a/doc/source/man/nova-novncproxy.rst
+++ b/doc/source/man/nova-novncproxy.rst
@@ -2,9 +2,9 @@
nova-novncproxy
===============
--------------------------------------------
-Websocket novnc Proxy for OpenStack Nova.
--------------------------------------------
+--------------------------------------------------------
+Websocket novnc Proxy for OpenStack Nova noVNC consoles.
+--------------------------------------------------------
:Author: openstack@lists.launchpad.net
:Date: 2012-09-27
@@ -21,7 +21,8 @@ SYNOPSIS
DESCRIPTION
===========
-Websocket proxy that is compatible with OpenStack Nova.
+Websocket proxy that is compatible with OpenStack Nova
+noVNC consoles.
OPTIONS
=======
@@ -40,7 +41,6 @@ SEE ALSO
========
* `OpenStack Nova <http://nova.openstack.org>`__
-* `OpenStack Nova <http://nova.openstack.org>`__
BUGS
====
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 3970974c0..85603fe59 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -117,7 +117,7 @@ paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 2580fafc9..77133d988 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1,2022 +1,2521 @@
-####################
-# nova.conf sample #
-####################
-
[DEFAULT]
-######## defined in nova.openstack.common.cfg:CommonConfigOpts ########
+#
+# Options defined in nova.openstack.common.cfg:CommonConfigOpts
+#
+
+# Print debugging output (boolean value)
+#debug=false
+
+# Print more verbose output (boolean value)
+#verbose=false
+
+# If this option is specified, the logging configuration file
+# specified is used and overrides any other logging options
+# specified. Please see the Python logging module
+# documentation for details on logging configuration files.
+# (string value)
+#log_config=<None>
+
+# A logging.Formatter log message format string which may use
+# any of the available logging.LogRecord attributes. Default:
+# %(default)s (string value)
+#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If not set,
+# logging will go to stdout. (string value)
+#log_file=<None>
+
+# (Optional) The directory to keep log files in (will be
+# prepended to --log-file) (string value)
+#log_dir=<None>
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+# syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in nova.availability_zones
+#
+
+# availability_zone to show internal services under (string
+# value)
+#internal_service_availability_zone=internal
+
+# default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+
+#
+# Options defined in nova.crypto
+#
+
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+#
+# Options defined in nova.exception
+#
+
+# make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in nova.manager
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in nova.netconf
+#
+
+# ip address of this host (string value)
+#my_ip=10.0.0.1
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host=nova
+
+# use ipv6 (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in nova.notifications
+#
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are False for no
+# notifications, True for notifications on any instance
+# changes. (boolean value)
+#notify_on_any_change=false
+
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are None for no
+# notifications, "vm_state" for notifications on VM state
+# changes, or "vm_and_task_state" for notifications on VM and
+# task state changes. (string value)
+#notify_on_state_change=<None>
+
+
+#
+# Options defined in nova.paths
+#
+
+# Directory where the nova python module is installed (string
+# value)
+#pybasedir=/usr/lib/python/site-packages
+
+# Directory where nova binaries are installed (string value)
+#bindir=$pybasedir/bin
+
+# Top-level directory for maintaining nova's state (string
+# value)
+#state_path=$pybasedir
+
+
+#
+# Options defined in nova.policy
+#
+
+# JSON file representing policy (string value)
+#policy_file=policy.json
+
+# Rule checked when requested rule is not found (string value)
+#policy_default_rule=default
+
+
+#
+# Options defined in nova.quota
+#
+
+# number of instances allowed per project (integer value)
+#quota_instances=10
+
+# number of instance cores allowed per project (integer value)
+#quota_cores=20
+
+# megabytes of instance ram allowed per project (integer
+# value)
+#quota_ram=51200
+
+# number of floating ips allowed per project (integer value)
+#quota_floating_ips=10
+
+# number of metadata items allowed per instance (integer
+# value)
+#quota_metadata_items=128
+
+# number of injected files allowed (integer value)
+#quota_injected_files=5
+
+# number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+
+# number of bytes allowed per injected file path (integer
+# value)
+#quota_injected_file_path_bytes=255
+
+# number of security groups per project (integer value)
+#quota_security_groups=10
+
+# number of security rules per security group (integer value)
+#quota_security_group_rules=20
+
+# number of key pairs per user (integer value)
+#quota_key_pairs=100
-# debug=false
-#### (BoolOpt) Print debugging output
+# number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
-# verbose=false
-#### (BoolOpt) Print more verbose output
+# count of reservations until usage is refreshed (integer
+# value)
+#until_refresh=0
-# log_config=<None>
-#### (StrOpt) If this option is specified, the logging configuration file
-#### specified is used and overrides any other logging options
-#### specified. Please see the Python logging module
-#### documentation for details on logging configuration files.
+# number of seconds between subsequent usage refreshes
+# (integer value)
+#max_age=0
-# log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
-#### (StrOpt) A logging.Formatter log message format string which may use
-#### any of the available logging.LogRecord attributes. Default:
-#### %default
+# default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
-# log_date_format=%Y-%m-%d %H:%M:%S
-#### (StrOpt) Format string for %(asctime)s in log records. Default:
-#### %default
-# log_file=<None>
-#### (StrOpt) (Optional) Name of log file to output to. If not set,
-#### logging will go to stdout.
+#
+# Options defined in nova.service
+#
-# log_dir=<None>
-#### (StrOpt) (Optional) The directory to keep log files in (will be
-#### prepended to --logfile)
+# seconds between nodes reporting state to datastore (integer
+# value)
+#report_interval=10
-# use_syslog=false
-#### (BoolOpt) Use syslog for logging.
+# enable periodic tasks (boolean value)
+#periodic_enable=true
-# syslog_log_facility=LOG_USER
-#### (StrOpt) syslog facility to receive log lines
+# range of seconds to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+# a list of APIs to enable by default (list value)
+#enabled_apis=ec2,osapi_compute,metadata
-######## defined in nova.crypto ########
+# IP address for EC2 API to listen (string value)
+#ec2_listen=0.0.0.0
-# ca_file=cacert.pem
-#### (StrOpt) Filename of root CA
+# port for ec2 api to listen (integer value)
+#ec2_listen_port=8773
-# key_file=private/cakey.pem
-#### (StrOpt) Filename of private key
+# Number of workers for EC2 API service (integer value)
+#ec2_workers=<None>
-# crl_file=crl.pem
-#### (StrOpt) Filename of root Certificate Revocation List
+# IP address for OpenStack API to listen (string value)
+#osapi_compute_listen=0.0.0.0
-# keys_path=$state_path/keys
-#### (StrOpt) Where we keep our keys
+# list port for osapi compute (integer value)
+#osapi_compute_listen_port=8774
-# ca_path=$state_path/CA
-#### (StrOpt) Where we keep our root CA
+# Number of workers for OpenStack API service (integer value)
+#osapi_compute_workers=<None>
-# use_project_ca=false
-#### (BoolOpt) Should we use a CA for each project?
+# OpenStack metadata service manager (string value)
+#metadata_manager=nova.api.manager.MetadataManager
-# user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
-#### (StrOpt) Subject for certificate for users, %s for project, user,
-#### timestamp
+# IP address for metadata api to listen (string value)
+#metadata_listen=0.0.0.0
-# project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
-#### (StrOpt) Subject for certificate for projects, %s for project,
-#### timestamp
+# port for metadata api to listen (integer value)
+#metadata_listen_port=8775
+# Number of workers for metadata service (integer value)
+#metadata_workers=<None>
-######## defined in nova.flags ########
+# full class name for the Manager for compute (string value)
+#compute_manager=nova.compute.manager.ComputeManager
-# sql_connection=sqlite:///$state_path/$sqlite_db
-#### (StrOpt) The SQLAlchemy connection string used to connect to the
-#### database
+# full class name for the Manager for console proxy (string
+# value)
+#console_manager=nova.console.manager.ConsoleProxyManager
-# api_paste_config=api-paste.ini
-#### (StrOpt) File name for the paste.deploy config for nova-api
+# full class name for the Manager for cert (string value)
+#cert_manager=nova.cert.manager.CertManager
-# pybasedir=/usr/lib/python/site-packages
-#### (StrOpt) Directory where the nova python module is installed
+# full class name for the Manager for network (string value)
+#network_manager=nova.network.manager.VlanManager
-# bindir=$pybasedir/bin
-#### (StrOpt) Directory where nova binaries are installed
+# full class name for the Manager for scheduler (string value)
+#scheduler_manager=nova.scheduler.manager.SchedulerManager
-# state_path=$pybasedir
-#### (StrOpt) Top-level directory for maintaining nova's state
+# maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
-# lock_path=$pybasedir
-#### (StrOpt) Directory to use for lock files
-# fake_network=false
-#### (BoolOpt) If passed, use fake network devices and addresses
+#
+# Options defined in nova.test
+#
-# sql_connection_debug=0
-#### (IntOpt) Verbosity of SQL debugging information. 0=None,
-#### 100=Everything
+# File name of clean sqlite db (string value)
+#sqlite_clean_db=clean.sqlite
-# sql_connection_trace=false
-#### (BoolOpt) Add python stack traces to SQL as comment strings
-# my_ip=10.0.0.1
-#### (StrOpt) ip address of this host
+#
+# Options defined in nova.utils
+#
-# region_list=
-#### (ListOpt) list of region=fqdn pairs separated by commas
+# Whether to log monkey patching (boolean value)
+#monkey_patch=false
-# aws_access_key_id=admin
-#### (StrOpt) AWS Access ID
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator
-# aws_secret_access_key=admin
-#### (StrOpt) AWS Access Key
+# Length of generated instance admin passwords (integer value)
+#password_length=12
-# glance_host=$my_ip
-#### (StrOpt) default glance hostname or ip
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
-# glance_port=9292
-#### (IntOpt) default glance port
+# time period to generate instance usages for. Time period
+# must be hour, day, month or year (string value)
+#instance_usage_audit_period=month
-# glance_api_servers=$glance_host:$glance_port
-#### (ListOpt) A list of the glance api servers available to nova. Prefix
-#### with https:// for ssl-based glance api servers.
-#### ([hostname|ip]:port)
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+#rootwrap_config=/etc/nova/rootwrap.conf
-# glance_api_insecure=false
-#### (BoolOpt) Allow to perform insecure SSL (https) requests to glance
-# glance_num_retries=0
-#### (IntOpt) Number retries when downloading an image from glance
+#
+# Options defined in nova.wsgi
+#
-# s3_port=3333
-#### (IntOpt) port used when accessing the s3 api
+# File name for the paste.deploy config for nova-api (string
+# value)
+#api_paste_config=api-paste.ini
-# s3_host=$my_ip
-#### (StrOpt) hostname or ip for openstack to use when accessing the s3
-#### api
+# A python format string that is used as the template to
+# generate log lines. The following values can be formatted
+# into it: client_ip, date_time, request_line, status_code,
+# body_length, wall_seconds. (string value)
+#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
-# cert_topic=cert
-#### (StrOpt) the topic cert nodes listen on
-# compute_topic=compute
-#### (StrOpt) the topic compute nodes listen on
+#
+# Options defined in nova.api.auth
+#
-# console_topic=console
-#### (StrOpt) the topic console proxy nodes listen on
+# whether to rate limit the api (boolean value)
+#api_rate_limit=true
-# scheduler_topic=scheduler
-#### (StrOpt) the topic scheduler nodes listen on
+# The strategy to use for auth: noauth or keystone. (string
+# value)
+#auth_strategy=noauth
-# volume_topic=volume
-#### (StrOpt) the topic volume nodes listen on
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
-# network_topic=network
-#### (StrOpt) the topic network nodes listen on
-# api_rate_limit=true
-#### (BoolOpt) whether to rate limit the api
+#
+# Options defined in nova.api.ec2
+#
-# enabled_apis=ec2,osapi_compute,osapi_volume,metadata
-#### (ListOpt) a list of APIs to enable by default
+# Number of failed auths before lockout. (integer value)
+#lockout_attempts=5
-# ec2_host=$my_ip
-#### (StrOpt) the ip of the ec2 api server
+# Number of minutes to lockout if triggered. (integer value)
+#lockout_minutes=15
-# ec2_dmz_host=$my_ip
-#### (StrOpt) the internal ip of the ec2 api server
+# Number of minutes for lockout window. (integer value)
+#lockout_window=15
-# ec2_port=8773
-#### (IntOpt) the port of the ec2 api server
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
-# ec2_scheme=http
-#### (StrOpt) the protocol to use when connecting to the ec2 api server
-#### (http, https)
+# Return the IP address as private dns hostname in describe
+# instances (boolean value)
+#ec2_private_dns_show_ip=false
-# ec2_path=/services/Cloud
-#### (StrOpt) the path prefix used to call the ec2 api server
+# Validate security group names according to EC2 specification
+# (boolean value)
+#ec2_strict_validation=true
-# osapi_compute_ext_list=
-#### (ListOpt) Specify list of extensions to load when using
-#### osapi_compute_extension option with
-#### nova.api.openstack.compute.contrib.select_extensions
+# Time in seconds before ec2 timestamp expires (integer value)
+#ec2_timestamp_expiry=300
-# osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
-#### (MultiStrOpt) osapi compute extension to load
-# osapi_volume_ext_list=
-#### (ListOpt) Specify list of extensions to load when using
-#### osapi_volume_extension option with
-#### nova.api.openstack.volume.contrib.select_extensions
+#
+# Options defined in nova.api.ec2.cloud
+#
-# osapi_volume_extension=nova.api.openstack.volume.contrib.standard_extensions
-#### (MultiStrOpt) osapi volume extension to load
+# the ip of the ec2 api server (string value)
+#ec2_host=$my_ip
-# osapi_path=/v1.1/
-#### (StrOpt) the path prefix used to call the openstack api server
+# the internal ip of the ec2 api server (string value)
+#ec2_dmz_host=$my_ip
-# osapi_compute_link_prefix=<None>
-#### (StrOpt) Base URL that will be presented to users in links to the
-#### OpenStack Compute API
+# the port of the ec2 api server (integer value)
+#ec2_port=8773
-# osapi_glance_link_prefix=<None>
-#### (StrOpt) Base URL that will be presented to users in links to glance
-#### resources
+# the protocol to use when connecting to the ec2 api server
+# (http, https) (string value)
+#ec2_scheme=http
-# osapi_max_limit=1000
-#### (IntOpt) the maximum number of items returned in a single response
-#### from a collection resource
+# the path prefix used to call the ec2 api server (string
+# value)
+#ec2_path=/services/Cloud
-# metadata_host=$my_ip
-#### (StrOpt) the ip for the metadata api server
+# list of region=fqdn pairs separated by commas (list value)
+#region_list=
-# metadata_port=8775
-#### (IntOpt) the port for the metadata api port
-# default_image=ami-11111
-#### (StrOpt) default image to use, testing only
+#
+# Options defined in nova.api.metadata.base
+#
-# default_instance_type=m1.small
-#### (StrOpt) default instance type to use, testing only
+# List of metadata versions to skip placing into the config
+# drive (string value)
+#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
-# null_kernel=nokernel
-#### (StrOpt) kernel image that indicates not to use a kernel, but to use
-#### a raw disk image instead
-# vpn_image_id=0
-#### (StrOpt) image id used when starting up a cloudpipe vpn server
+#
+# Options defined in nova.api.metadata.handler
+#
-# vpn_key_suffix=-vpn
-#### (StrOpt) Suffix to add to project name for vpn key and secgroups
+# Set flag to indicate Quantum will proxy metadata requests
+# and resolve instance ids. (boolean value)
+#service_quantum_metadata_proxy=false
-# sqlite_db=nova.sqlite
-#### (StrOpt) the filename to use with sqlite
+# Shared secret to validate proxies Quantum metadata requests
+# (string value)
+#quantum_metadata_proxy_shared_secret=
-# sqlite_synchronous=true
-#### (BoolOpt) If passed, use synchronous mode for sqlite
-# sql_idle_timeout=3600
-#### (IntOpt) timeout before idle sql connections are reaped
+#
+# Options defined in nova.api.openstack.common
+#
-# sql_max_retries=10
-#### (IntOpt) maximum db connection retries during startup. (setting -1
-#### implies an infinite retry count)
+# the maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
-# sql_retry_interval=10
-#### (IntOpt) interval between retries of opening a sql connection
+# Base URL that will be presented to users in links to the
+# OpenStack Compute API (string value)
+#osapi_compute_link_prefix=<None>
-# compute_manager=nova.compute.manager.ComputeManager
-#### (StrOpt) full class name for the Manager for compute
+# Base URL that will be presented to users in links to glance
+# resources (string value)
+#osapi_glance_link_prefix=<None>
-# console_manager=nova.console.manager.ConsoleProxyManager
-#### (StrOpt) full class name for the Manager for console proxy
-# cert_manager=nova.cert.manager.CertManager
-#### (StrOpt) full class name for the Manager for cert
+#
+# Options defined in nova.api.openstack.compute
+#
-# instance_dns_manager=nova.network.dns_driver.DNSDriver
-#### (StrOpt) full class name for the DNS Manager for instance IPs
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
-# instance_dns_domain=
-#### (StrOpt) full class name for the DNS Zone for instance IPs
-# floating_ip_dns_manager=nova.network.dns_driver.DNSDriver
-#### (StrOpt) full class name for the DNS Manager for floating IPs
+#
+# Options defined in nova.api.openstack.compute.contrib
+#
-# network_manager=nova.network.manager.VlanManager
-#### (StrOpt) full class name for the Manager for network
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list
+# value)
+#osapi_compute_ext_list=
-# volume_manager=nova.volume.manager.VolumeManager
-#### (StrOpt) full class name for the Manager for volume
-# scheduler_manager=nova.scheduler.manager.SchedulerManager
-#### (StrOpt) full class name for the Manager for scheduler
+#
+# Options defined in nova.api.openstack.compute.contrib.fping
+#
-# host=nova
-#### (StrOpt) Name of this node. This can be an opaque identifier. It is
-#### not necessarily a hostname, FQDN, or IP address. However,
-#### the node name must be valid within an AMQP key, and if using
-#### ZeroMQ, a valid hostname, FQDN, or IP address
+# Full path to fping. (string value)
+#fping_path=/usr/sbin/fping
-# node_availability_zone=nova
-#### (StrOpt) availability zone of this node
-# memcached_servers=<None>
-#### (ListOpt) Memcached servers or None for in process cache.
+#
+# Options defined in nova.api.openstack.compute.contrib.hide_server_addresses
+#
-# instance_usage_audit_period=month
-#### (StrOpt) time period to generate instance usages for. Time period
-#### must be hour, day, month or year
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
-# bandwidth_poll_interval=600
-#### (IntOpt) interval to pull bandwidth usage info
-# start_guests_on_host_boot=false
-#### (BoolOpt) Whether to restart guests when the host reboots
+#
+# Options defined in nova.api.openstack.compute.extensions
+#
-# resume_guests_state_on_host_boot=false
-#### (BoolOpt) Whether to start guests that were running before the host
-#### rebooted
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
-# default_ephemeral_format=<None>
-#### (StrOpt) The default format an ephemeral_volume will be formatted
-#### with on creation.
-# root_helper=sudo
-#### (StrOpt) Deprecated: command to use for running commands as root
+#
+# Options defined in nova.api.openstack.compute.servers
+#
-# rootwrap_config=<None>
-#### (StrOpt) Path to the rootwrap configuration file to use for running
-#### commands as root
+# Allows use of instance password during server creation
+# (boolean value)
+#enable_instance_password=true
-# network_driver=nova.network.linux_net
-#### (StrOpt) Driver to use for network creation
-# use_ipv6=false
-#### (BoolOpt) use ipv6
+#
+# Options defined in nova.api.sizelimit
+#
-# enable_instance_password=true
-#### (BoolOpt) Allows use of instance password during server creation
+# the maximum body size per each osapi request(bytes) (integer
+# value)
+#osapi_max_request_body_size=114688
-# password_length=12
-#### (IntOpt) Length of generated instance admin passwords
-# monkey_patch=false
-#### (BoolOpt) Whether to log monkey patching
+#
+# Options defined in nova.cert.rpcapi
+#
-# monkey_patch_modules=nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator
-#### (ListOpt) List of modules/decorators to monkey patch
+# the topic cert nodes listen on (string value)
+#cert_topic=cert
-# allow_resize_to_same_host=false
-#### (BoolOpt) Allow destination machine to match source for resize. Useful
-#### when testing in single-host environments.
-# reclaim_instance_interval=0
-#### (IntOpt) Interval in seconds for reclaiming deleted instances
+#
+# Options defined in nova.cloudpipe.pipelib
+#
-# zombie_instance_updated_at_window=172800
-#### (IntOpt) Number of seconds zombie instances are cleaned up.
+# image id used when starting up a cloudpipe vpn server
+# (string value)
+#vpn_image_id=0
-# service_down_time=60
-#### (IntOpt) maximum time since last check-in for up service
+# Instance type for vpn instances (string value)
+#vpn_instance_type=m1.tiny
-# default_schedule_zone=<None>
-#### (StrOpt) availability zone to use when user doesn't specify one
+# Template for cloudpipe instance boot script (string value)
+#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
-# isolated_images=
-#### (ListOpt) Images to run on isolated host
+# Network to push into openvpn config (string value)
+#dmz_net=10.0.0.0
-# isolated_hosts=
-#### (ListOpt) Host reserved for specific images
+# Netmask to push into openvpn config (string value)
+#dmz_mask=255.255.255.0
-# cache_images=all
-#### (StrOpt) Cache glance images locally. `all` will cache all images,
-#### `some` will only cache images that have the image_property
-#### `cache_in_nova=True`, and `none` turns off caching entirely
+# Suffix to add to project name for vpn key and secgroups
+# (string value)
+#vpn_key_suffix=-vpn
-# use_cow_images=true
-#### (BoolOpt) Whether to use cow images
-# compute_api_class=nova.compute.api.API
-#### (StrOpt) The full class name of the compute API class to use
+#
+# Options defined in nova.common.memorycache
+#
-# network_api_class=nova.network.api.API
-#### (StrOpt) The full class name of the network API class to use
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
-# volume_api_class=nova.volume.api.API
-#### (StrOpt) The full class name of the volume API class to use
-# security_group_handler=nova.network.sg.NullSecurityGroupHandler
-#### (StrOpt) The full class name of the security group handler class
+#
+# Options defined in nova.compute
+#
-# default_access_ip_network_name=<None>
-#### (StrOpt) Name of network to use to set access ips for instances
+# The full class name of the compute API class to use (string
+# value)
+#compute_api_class=nova.compute.api.API
-# auth_strategy=noauth
-#### (StrOpt) The strategy to use for auth: noauth or keystone.
-# non_inheritable_image_properties=cache_in_nova,instance_uuid,user_id,image_type,backup_type,min_ram,min_disk
-#### (ListOpt) These are image properties which a snapshot should not
-#### inherit from an instance
+#
+# Options defined in nova.compute.api
+#
-# defer_iptables_apply=false
-#### (BoolOpt) Whether to batch up the application of IPTables rules during
-#### a host restart and apply all at the end of the init phase
+# Allow destination machine to match source for resize. Useful
+# when testing in single-host environments. (boolean value)
+#allow_resize_to_same_host=false
+# availability zone to use when user doesn't specify one
+# (string value)
+#default_schedule_zone=<None>
-######## defined in nova.notifications ########
+# These are image properties which a snapshot should not
+# inherit from an instance (list value)
+#non_inheritable_image_properties=cache_in_nova,bittorrent
-# notify_on_any_change=false
-#### (BoolOpt) If set, send compute.instance.update notifications on
-#### instance state changes. Valid values are False for no
-#### notifications, True for notifications on any instance
-#### changes.
+# kernel image that indicates not to use a kernel, but to use
+# a raw disk image instead (string value)
+#null_kernel=nokernel
-# notify_on_state_change=<None>
-#### (StrOpt) If set, send compute.instance.update notifications on
-#### instance state changes. Valid values are None for no
-#### notifications, "vm_state" for notifications on VM state
-#### changes, or "vm_and_task_state" for notifications on VM and
-#### task state changes.
+# The full class name of the security group handler class
+# (string value)
+#security_group_handler=nova.network.sg.NullSecurityGroupHandler
+# The full class name of the security API class (string value)
+#security_group_api=nova.compute.api.SecurityGroupAPI
-######## defined in nova.policy ########
-# policy_file=policy.json
-#### (StrOpt) JSON file representing policy
+#
+# Options defined in nova.compute.instance_types
+#
-# policy_default_rule=default
-#### (StrOpt) Rule checked when requested rule is not found
+# default instance type to use, testing only (string value)
+#default_instance_type=m1.small
-######## defined in nova.quota ########
+#
+# Options defined in nova.compute.manager
+#
-# quota_instances=10
-#### (IntOpt) number of instances allowed per project
+# Console proxy host to use to connect to instances on this
+# host. (string value)
+#console_host=nova
-# quota_cores=20
-#### (IntOpt) number of instance cores allowed per project
+# Name of network to use to set access ips for instances
+# (string value)
+#default_access_ip_network_name=<None>
-# quota_ram=51200
-#### (IntOpt) megabytes of instance ram allowed per project
+# Whether to batch up the application of IPTables rules during
+# a host restart and apply all at the end of the init phase
+# (boolean value)
+#defer_iptables_apply=false
-# quota_volumes=10
-#### (IntOpt) number of volumes allowed per project
+# where instances are stored on disk (string value)
+#instances_path=$state_path/instances
-# quota_gigabytes=1000
-#### (IntOpt) number of volume gigabytes allowed per project
+# Generate periodic compute.instance.exists notifications
+# (boolean value)
+#instance_usage_audit=false
-# quota_floating_ips=10
-#### (IntOpt) number of floating ips allowed per project
+# Number of 1 second retries needed in live_migration (integer
+# value)
+#live_migration_retry_count=30
-# quota_metadata_items=128
-#### (IntOpt) number of metadata items allowed per instance
+# Whether to start guests that were running before the host
+# rebooted (boolean value)
+#resume_guests_state_on_host_boot=false
-# quota_injected_files=5
-#### (IntOpt) number of injected files allowed
+# interval to pull bandwidth usage info (integer value)
+#bandwidth_poll_interval=600
-# quota_injected_file_content_bytes=10240
-#### (IntOpt) number of bytes allowed per injected file
+# Number of seconds between instance info_cache self healing
+# updates (integer value)
+#heal_instance_info_cache_interval=60
-# quota_injected_file_path_bytes=255
-#### (IntOpt) number of bytes allowed per injected file path
+# Interval in seconds for querying the host status (integer
+# value)
+#host_state_interval=120
-# quota_security_groups=10
-#### (IntOpt) number of security groups per project
+# Number of seconds to wait between runs of the image cache
+# manager (integer value)
+#image_cache_manager_interval=2400
-# quota_security_group_rules=20
-#### (IntOpt) number of security rules per security group
+# Interval in seconds for reclaiming deleted instances
+# (integer value)
+#reclaim_instance_interval=0
-# quota_key_pairs=100
-#### (IntOpt) number of key pairs per user
+# Interval in seconds for gathering volume usages (integer
+# value)
+#volume_usage_poll_interval=0
-# reservation_expire=86400
-#### (IntOpt) number of seconds until a reservation expires
+# Action to take if a running deleted instance is
+# detected.Valid options are 'noop', 'log' and 'reap'. Set to
+# 'noop' to disable. (string value)
+#running_deleted_instance_action=log
-# until_refresh=0
-#### (IntOpt) count of reservations until usage is refreshed
+# Number of seconds to wait between runs of the cleanup task.
+# (integer value)
+#running_deleted_instance_poll_interval=1800
-# max_age=0
-#### (IntOpt) number of seconds between subsequent usage refreshes
+# Number of seconds after being deleted when a running
+# instance should be considered eligible for cleanup. (integer
+# value)
+#running_deleted_instance_timeout=0
-# quota_driver=nova.quota.DbQuotaDriver
-#### (StrOpt) default driver to use for quota checks
+# Automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds. Set to 0 to
+# disable. (integer value)
+#reboot_timeout=0
+# Amount of time in seconds an instance can be in BUILD before
+# going into ERROR status.Set to 0 to disable. (integer value)
+#instance_build_timeout=0
-######## defined in nova.service ########
+# Automatically unrescue an instance after N seconds. Set to 0
+# to disable. (integer value)
+#rescue_timeout=0
-# report_interval=10
-#### (IntOpt) seconds between nodes reporting state to datastore
+# Automatically confirm resizes after N seconds. Set to 0 to
+# disable. (integer value)
+#resize_confirm_window=0
-# periodic_interval=60
-#### (IntOpt) seconds between running periodic tasks
-# periodic_fuzzy_delay=60
-#### (IntOpt) range of seconds to randomly delay when starting the
-#### periodic task scheduler to reduce stampeding. (Disable by
-#### setting to 0)
+#
+# Options defined in nova.compute.resource_tracker
+#
-# ec2_listen=0.0.0.0
-#### (StrOpt) IP address for EC2 API to listen
+# Amount of disk in MB to reserve for the host (integer value)
+#reserved_host_disk_mb=0
-# ec2_listen_port=8773
-#### (IntOpt) port for ec2 api to listen
+# Amount of memory in MB to reserve for the host (integer
+# value)
+#reserved_host_memory_mb=512
-# ec2_workers=<None>
-#### (IntOpt) Number of workers for EC2 API service
+# Class that will manage stats for the local compute host
+# (string value)
+#compute_stats_class=nova.compute.stats.Stats
-# osapi_compute_listen=0.0.0.0
-#### (StrOpt) IP address for OpenStack API to listen
-# osapi_compute_listen_port=8774
-#### (IntOpt) list port for osapi compute
+#
+# Options defined in nova.compute.rpcapi
+#
-# osapi_compute_workers=<None>
-#### (IntOpt) Number of workers for OpenStack API service
+# the topic compute nodes listen on (string value)
+#compute_topic=compute
-# metadata_manager=nova.api.manager.MetadataManager
-#### (StrOpt) OpenStack metadata service manager
-# metadata_listen=0.0.0.0
-#### (StrOpt) IP address for metadata api to listen
+#
+# Options defined in nova.console.manager
+#
-# metadata_listen_port=8775
-#### (IntOpt) port for metadata api to listen
+# Driver to use for the console proxy (string value)
+#console_driver=nova.console.xvp.XVPConsoleProxy
-# metadata_workers=<None>
-#### (IntOpt) Number of workers for metadata service
+# Stub calls to compute worker for tests (boolean value)
+#stub_compute=false
-# osapi_volume_listen=0.0.0.0
-#### (StrOpt) IP address for OpenStack Volume API to listen
+# Publicly visible name for this console host (string value)
+#console_public_hostname=nova
-# osapi_volume_listen_port=8776
-#### (IntOpt) port for os volume api to listen
-# osapi_volume_workers=<None>
-#### (IntOpt) Number of workers for OpenStack Volume API service
+#
+# Options defined in nova.console.rpcapi
+#
+# the topic console proxy nodes listen on (string value)
+#console_topic=console
-######## defined in nova.test ########
-# sqlite_clean_db=clean.sqlite
-#### (StrOpt) File name of clean sqlite db
+#
+# Options defined in nova.console.vmrc
+#
-# fake_tests=true
-#### (BoolOpt) should we use everything for testing
+# port for VMware VMRC connections (integer value)
+#console_vmrc_port=443
+# number of retries for retrieving VMRC information (integer
+# value)
+#console_vmrc_error_retries=10
-######## defined in nova.api.auth ########
-# use_forwarded_for=false
-#### (BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only
-#### enable this if you have a sanitizing proxy.
+#
+# Options defined in nova.console.xvp
+#
+# XVP conf template (string value)
+#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
-######## defined in nova.api.ec2 ########
+# generated XVP conf file (string value)
+#console_xvp_conf=/etc/xvp.conf
-# lockout_attempts=5
-#### (IntOpt) Number of failed auths before lockout.
+# XVP master process pid file (string value)
+#console_xvp_pid=/var/run/xvp.pid
-# lockout_minutes=15
-#### (IntOpt) Number of minutes to lockout if triggered.
+# XVP log file (string value)
+#console_xvp_log=/var/log/xvp.log
-# lockout_window=15
-#### (IntOpt) Number of minutes for lockout window.
+# port for XVP to multiplex VNC connections on (integer value)
+#console_xvp_multiplex_port=5900
-# keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
-#### (StrOpt) URL to get token from ec2 request.
-# ec2_private_dns_show_ip=false
-#### (BoolOpt) Return the IP address as private dns hostname in describe
-#### instances
+#
+# Options defined in nova.consoleauth
+#
-# ec2_strict_validation=true
-#### (BoolOpt) Validate security group names according to EC2 specification
+# the topic console auth proxy nodes listen on (string value)
+#consoleauth_topic=consoleauth
-######## defined in nova.api.metadata.base ########
+#
+# Options defined in nova.consoleauth.manager
+#
-# config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
-#### (StrOpt) List of metadata versions to skip placing into the config
-#### drive
+# How many seconds before deleting tokens (integer value)
+#console_token_ttl=600
+# Manager for console auth (string value)
+#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
-######## defined in nova.api.openstack.compute ########
-# allow_instance_snapshots=true
-#### (BoolOpt) Permit instance snapshot operations.
+#
+# Options defined in nova.db.api
+#
+# The backend to use for db (string value)
+#db_backend=sqlalchemy
-######## defined in nova.api.sizelimit ########
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
-# osapi_max_request_body_size=114688
-#### (IntOpt) the maximum body size per each osapi request(bytes)
+# Template string to be used to generate instance names
+# (string value)
+#instance_name_template=instance-%08x
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
-######## defined in nova.cloudpipe.pipelib ########
-# vpn_instance_type=m1.tiny
-#### (StrOpt) Instance type for vpn instances
+#
+# Options defined in nova.db.base
+#
-# boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
-#### (StrOpt) Template for cloudpipe instance boot script
+# driver to use for database access (string value)
+#db_driver=nova.db
-# dmz_net=10.0.0.0
-#### (StrOpt) Network to push into openvpn config
-# dmz_mask=255.255.255.0
-#### (StrOpt) Netmask to push into openvpn config
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+# When set, compute API will consider duplicate hostnames
+# invalid within the specified scope, regardless of case.
+# Should be empty, "project" or "global". (string value)
+#osapi_compute_unique_server_name_scope=
-######## defined in nova.common.deprecated ########
-# fatal_deprecations=false
-#### (BoolOpt) make deprecations fatal
+#
+# Options defined in nova.db.sqlalchemy.session
+#
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+#sql_connection=sqlite:///$state_path/$sqlite_db
-######## defined in nova.common.eventlet_backdoor ########
+# the filename to use with sqlite (string value)
+#sqlite_db=nova.sqlite
-# backdoor_port=<None>
-#### (IntOpt) port for eventlet backdoor to listen
+# timeout before idle sql connections are reaped (integer
+# value)
+#sql_idle_timeout=3600
+# If passed, use synchronous mode for sqlite (boolean value)
+#sqlite_synchronous=true
-######## defined in nova.compute.manager ########
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+#sql_min_pool_size=1
-# instances_path=$state_path/instances
-#### (StrOpt) where instances are stored on disk
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#sql_max_pool_size=5
-# base_dir_name=_base
-#### (StrOpt) Where cached images are stored under $instances_path.This is
-#### NOT the full path - just a folder name.For per-compute-host
-#### cached images, set to _base_$my_ip
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#sql_max_retries=10
-# compute_driver=nova.virt.connection.get_connection
-#### (StrOpt) Driver to use for controlling virtualization. Options
-#### include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
-#### fake.FakeDriver, baremetal.BareMetalDriver,
-#### vmwareapi.VMWareESXDriver
+# interval between retries of opening a sql connection
+# (integer value)
+#sql_retry_interval=10
-# console_host=nova
-#### (StrOpt) Console proxy host to use to connect to instances on this
-#### host.
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#sql_max_overflow=<None>
-# live_migration_retry_count=30
-#### (IntOpt) Number of 1 second retries needed in live_migration
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#sql_connection_debug=0
-# reboot_timeout=0
-#### (IntOpt) Automatically hard reboot an instance if it has been stuck
-#### in a rebooting state longer than N seconds. Set to 0 to
-#### disable.
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#sql_connection_trace=false
-# instance_build_timeout=0
-#### (IntOpt) Amount of time in seconds an instance can be in BUILD before
-#### going into ERROR status.Set to 0 to disable.
+# enable the use of eventlet's db_pool for MySQL (boolean
+# value)
+#sql_dbpool_enable=false
-# rescue_timeout=0
-#### (IntOpt) Automatically unrescue an instance after N seconds. Set to 0
-#### to disable.
-# resize_confirm_window=0
-#### (IntOpt) Automatically confirm resizes after N seconds. Set to 0 to
-#### disable.
+#
+# Options defined in nova.image.glance
+#
-# host_state_interval=120
-#### (IntOpt) Interval in seconds for querying the host status
+# default glance hostname or ip (string value)
+#glance_host=$my_ip
-# running_deleted_instance_timeout=0
-#### (IntOpt) Number of seconds after being deleted when a running
-#### instance should be considered eligible for cleanup.
+# default glance port (integer value)
+#glance_port=9292
-# running_deleted_instance_poll_interval=30
-#### (IntOpt) Number of periodic scheduler ticks to wait between runs of
-#### the cleanup task.
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
-# running_deleted_instance_action=log
-#### (StrOpt) Action to take if a running deleted instance is
-#### detected.Valid options are 'noop', 'log' and 'reap'. Set to
-#### 'noop' to disable.
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
-# image_cache_manager_interval=40
-#### (IntOpt) Number of periodic scheduler ticks to wait between runs of
-#### the image cache manager.
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#glance_api_insecure=false
-# heal_instance_info_cache_interval=60
-#### (IntOpt) Number of seconds between instance info_cache self healing
-#### updates
+# Number retries when downloading an image from glance
+# (integer value)
+#glance_num_retries=0
-# instance_usage_audit=false
-#### (BoolOpt) Generate periodic compute.instance.exists notifications
+#
+# Options defined in nova.image.s3
+#
-######## defined in nova.compute.resource_tracker ########
+# parent dir for tempdir used for image decryption (string
+# value)
+#image_decryption_dir=/tmp
-# reserved_host_disk_mb=0
-#### (IntOpt) Amount of disk in MB to reserve for the host
+# hostname or ip for openstack to use when accessing the s3
+# api (string value)
+#s3_host=$my_ip
-# reserved_host_memory_mb=512
-#### (IntOpt) Amount of memory in MB to reserve for the host
+# port used when accessing the s3 api (integer value)
+#s3_port=3333
-# claim_timeout_seconds=600
-#### (IntOpt) How long, in seconds, before a resource claim times out
+# access key to use for s3 server for images (string value)
+#s3_access_key=notchecked
-# compute_stats_class=nova.compute.stats.Stats
-#### (StrOpt) Class that will manage stats for the local compute host
+# secret key to use for s3 server for images (string value)
+#s3_secret_key=notchecked
+# whether to use ssl when talking to s3 (boolean value)
+#s3_use_ssl=false
-######## defined in nova.console.manager ########
+# whether to affix the tenant id to the access key when
+# downloading from s3 (boolean value)
+#s3_affix_tenant=false
-# console_driver=nova.console.xvp.XVPConsoleProxy
-#### (StrOpt) Driver to use for the console proxy
-# stub_compute=false
-#### (BoolOpt) Stub calls to compute worker for tests
+#
+# Options defined in nova.ipv6.api
+#
-# console_public_hostname=nova
-#### (StrOpt) Publicly visible name for this console host
+# Backend to use for IPv6 generation (string value)
+#ipv6_backend=rfc2462
-######## defined in nova.console.vmrc ########
+#
+# Options defined in nova.network
+#
-# console_vmrc_port=443
-#### (IntOpt) port for VMware VMRC connections
+# The full class name of the network API class to use (string
+# value)
+#network_api_class=nova.network.api.API
-# console_vmrc_error_retries=10
-#### (IntOpt) number of retries for retrieving VMRC information
+#
+# Options defined in nova.network.driver
+#
-######## defined in nova.console.xvp ########
+# Driver to use for network creation (string value)
+#network_driver=nova.network.linux_net
-# console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
-#### (StrOpt) XVP conf template
-# console_xvp_conf=/etc/xvp.conf
-#### (StrOpt) generated XVP conf file
+#
+# Options defined in nova.network.ldapdns
+#
-# console_xvp_pid=/var/run/xvp.pid
-#### (StrOpt) XVP master process pid file
+# URL for ldap server which will store dns entries (string
+# value)
+#ldap_dns_url=ldap://ldap.example.com:389
-# console_xvp_log=/var/log/xvp.log
-#### (StrOpt) XVP log file
+# user for ldap DNS (string value)
+#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
-# console_xvp_multiplex_port=5900
-#### (IntOpt) port for XVP to multiplex VNC connections on
+# password for ldap DNS (string value)
+#ldap_dns_password=password
+# Hostmaster for ldap dns driver Statement of Authority
+# (string value)
+#ldap_dns_soa_hostmaster=hostmaster@example.org
-######## defined in nova.consoleauth ########
+# DNS Servers for ldap dns driver (multi valued)
+#ldap_dns_servers=dns.example.org
-# consoleauth_topic=consoleauth
-#### (StrOpt) the topic console auth proxy nodes listen on
+# Base DN for DNS entries in ldap (string value)
+#ldap_dns_base_dn=ou=hosts,dc=example,dc=org
+# Refresh interval (in seconds) for ldap dns driver Statement
+# of Authority (string value)
+#ldap_dns_soa_refresh=1800
-######## defined in nova.consoleauth.manager ########
+# Retry interval (in seconds) for ldap dns driver Statement of
+# Authority (string value)
+#ldap_dns_soa_retry=3600
-# console_token_ttl=600
-#### (IntOpt) How many seconds before deleting tokens
+# Expiry interval (in seconds) for ldap dns driver Statement
+# of Authority (string value)
+#ldap_dns_soa_expiry=86400
-# consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
-#### (StrOpt) Manager for console auth
+# Minimum interval (in seconds) for ldap dns driver Statement
+# of Authority (string value)
+#ldap_dns_soa_minimum=7200
-######## defined in nova.db.api ########
+#
+# Options defined in nova.network.linux_net
+#
-# db_backend=sqlalchemy
-#### (StrOpt) The backend to use for db
+# location of flagfile for dhcpbridge (string value)
+#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
-# enable_new_services=true
-#### (BoolOpt) Services to be added to the available pool on create
+# Location to keep network config files (string value)
+#networks_path=$state_path/networks
-# instance_name_template=instance-%08x
-#### (StrOpt) Template string to be used to generate instance names
+# Interface for public IP addresses (string value)
+#public_interface=eth0
-# volume_name_template=volume-%s
-#### (StrOpt) Template string to be used to generate instance names
+# MTU setting for vlan (string value)
+#network_device_mtu=<None>
-# snapshot_name_template=snapshot-%s
-#### (StrOpt) Template string to be used to generate snapshot names
+# location of nova-dhcpbridge (string value)
+#dhcpbridge=$bindir/nova-dhcpbridge
+# Public IP of network host (string value)
+#routing_source_ip=$my_ip
-######## defined in nova.db.base ########
+# Lifetime of a DHCP lease in seconds (integer value)
+#dhcp_lease_time=120
-# db_driver=nova.db
-#### (StrOpt) driver to use for database access
+# if set, uses specific dns server for dnsmasq (string value)
+#dns_server=<None>
+# A list of dmz range that should be accepted (list value)
+#dmz_cidr=
-######## defined in nova.image.s3 ########
+# Override the default dnsmasq settings with this file (string
+# value)
+#dnsmasq_config_file=
-# image_decryption_dir=/tmp
-#### (StrOpt) parent dir for tempdir used for image decryption
+# Driver used to create ethernet devices. (string value)
+#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
-# s3_access_key=notchecked
-#### (StrOpt) access key to use for s3 server for images
+# Name of Open vSwitch bridge used with linuxnet (string
+# value)
+#linuxnet_ovs_integration_bridge=br-int
-# s3_secret_key=notchecked
-#### (StrOpt) secret key to use for s3 server for images
+# send gratuitous ARPs for HA setup (boolean value)
+#send_arp_for_ha=false
-# s3_use_ssl=false
-#### (BoolOpt) whether to use ssl when talking to s3
+# send this many gratuitous ARPs for HA setup (integer value)
+#send_arp_for_ha_count=3
-# s3_affix_tenant=false
-#### (BoolOpt) whether to affix the tenant id to the access key when
-#### downloading from s3
+# Use single default gateway. Only first nic of vm will get
+# default gateway from dhcp server (boolean value)
+#use_single_default_gateway=false
+# the ip for the metadata api server (string value)
+#metadata_host=$my_ip
-######## defined in nova.ipv6.api ########
+# the port for the metadata api port (integer value)
+#metadata_port=8775
-# ipv6_backend=rfc2462
-#### (StrOpt) Backend to use for IPv6 generation
+#
+# Options defined in nova.network.manager
+#
-######## defined in nova.network.ldapdns ########
+# Bridge for simple network instances (string value)
+#flat_network_bridge=<None>
-# ldap_dns_url=ldap://ldap.example.com:389
-#### (StrOpt) URL for ldap server which will store dns entries
+# Dns for simple network (string value)
+#flat_network_dns=8.8.4.4
-# ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
-#### (StrOpt) user for ldap DNS
+# Whether to attempt to inject network setup into guest
+# (boolean value)
+#flat_injected=false
-# ldap_dns_password=password
-#### (StrOpt) password for ldap DNS
+# FlatDhcp will bridge into this interface if set (string
+# value)
+#flat_interface=<None>
-# ldap_dns_soa_hostmaster=hostmaster@example.org
-#### (StrOpt) Hostmaster for ldap dns driver Statement of Authority
+# First VLAN for private networks (integer value)
+#vlan_start=100
-# ldap_dns_servers=dns.example.org
-#### (MultiStrOpt) DNS Servers for ldap dns driver
+# vlans will bridge into this interface if set (string value)
+#vlan_interface=<None>
-# ldap_dns_base_dn=ou=hosts,dc=example,dc=org
-#### (StrOpt) Base DN for DNS entries in ldap
+# Number of networks to support (integer value)
+#num_networks=1
-# ldap_dns_soa_refresh=1800
-#### (StrOpt) Refresh interval (in seconds) for ldap dns driver Statement
-#### of Authority
+# Public IP for the cloudpipe VPN servers (string value)
+#vpn_ip=$my_ip
-# ldap_dns_soa_retry=3600
-#### (StrOpt) Retry interval (in seconds) for ldap dns driver Statement of
-#### Authority
+# First Vpn port for private networks (integer value)
+#vpn_start=1000
-# ldap_dns_soa_expiry=86400
-#### (StrOpt) Expiry interval (in seconds) for ldap dns driver Statement
-#### of Authority
+# Default value for multi_host in networks (boolean value)
+#multi_host=false
-# ldap_dns_soa_minimum=7200
-#### (StrOpt) Minimum interval (in seconds) for ldap dns driver Statement
-#### of Authority
+# Number of addresses in each private subnet (integer value)
+#network_size=256
+# Floating IP address block (string value)
+#floating_range=4.4.4.0/24
-######## defined in nova.network.linux_net ########
+# Default pool for floating ips (string value)
+#default_floating_pool=nova
-# dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
-#### (StrOpt) location of flagfile for dhcpbridge
+# Fixed IP address block (string value)
+#fixed_range=10.0.0.0/8
-# networks_path=$state_path/networks
-#### (StrOpt) Location to keep network config files
+# Fixed IPv6 address block (string value)
+#fixed_range_v6=fd00::/48
-# public_interface=eth0
-#### (StrOpt) Interface for public IP addresses
+# Default IPv4 gateway (string value)
+#gateway=<None>
-# network_device_mtu=<None>
-#### (StrOpt) MTU setting for vlan
+# Default IPv6 gateway (string value)
+#gateway_v6=<None>
-# dhcpbridge=$bindir/nova-dhcpbridge
-#### (StrOpt) location of nova-dhcpbridge
+# Number of addresses reserved for vpn clients (integer value)
+#cnt_vpn_clients=0
-# routing_source_ip=$my_ip
-#### (StrOpt) Public IP of network host
+# Seconds after which a deallocated ip is disassociated
+# (integer value)
+#fixed_ip_disassociate_timeout=600
-# dhcp_lease_time=120
-#### (IntOpt) Lifetime of a DHCP lease in seconds
+# Number of attempts to create unique mac address (integer
+# value)
+#create_unique_mac_address_attempts=5
-# dns_server=<None>
-#### (StrOpt) if set, uses specific dns server for dnsmasq
+# Autoassigning floating ip to VM (boolean value)
+#auto_assign_floating_ip=false
-# dmz_cidr=
-#### (ListOpt) A list of dmz range that should be accepted
+# Network host to use for ip allocation in flat modes (string
+# value)
+#network_host=nova
-# dnsmasq_config_file=
-#### (StrOpt) Override the default dnsmasq settings with this file
+# If passed, use fake network devices and addresses (boolean
+# value)
+#fake_network=false
-# linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
-#### (StrOpt) Driver used to create ethernet devices.
+# If True, skip using the queue and make local calls (boolean
+# value)
+#fake_call=false
-# linuxnet_ovs_integration_bridge=br-int
-#### (StrOpt) Name of Open vSwitch bridge used with linuxnet
+# If True, unused gateway devices (VLAN and bridge) are
+# deleted in VLAN network mode with multi hosted networks
+# (boolean value)
+#teardown_unused_network_gateway=false
-# send_arp_for_ha=false
-#### (BoolOpt) send gratuitous ARPs for HA setup
+# If True, send a dhcp release on instance termination
+# (boolean value)
+#force_dhcp_release=false
-# send_arp_for_ha_count=3
-#### (IntOpt) send this many gratuitous ARPs for HA setup
+# If True in multi_host mode, all compute hosts share the same
+# dhcp address. (boolean value)
+#share_dhcp_address=false
-# use_single_default_gateway=false
-#### (BoolOpt) Use single default gateway. Only first nic of vm will get
-#### default gateway from dhcp server
+# If True, when a DNS entry must be updated, it sends a fanout
+# cast to all network hosts to update their DNS entries in
+# multi host mode (boolean value)
+#update_dns_entries=false
+# Number of seconds to wait between runs of updates to DNS
+# entries. (integer value)
+#dns_update_periodic_interval=-1
-######## defined in nova.network.manager ########
+# domain to use for building the hostnames (string value)
+#dhcp_domain=novalocal
-# flat_network_bridge=<None>
-#### (StrOpt) Bridge for simple network instances
+# Indicates underlying L3 management library (string value)
+#l3_lib=nova.network.l3.LinuxNetL3
-# flat_network_dns=8.8.4.4
-#### (StrOpt) Dns for simple network
+# full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
-# flat_injected=false
-#### (BoolOpt) Whether to attempt to inject network setup into guest
+# full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
-# flat_interface=<None>
-#### (StrOpt) FlatDhcp will bridge into this interface if set
+# full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
-# vlan_start=100
-#### (IntOpt) First VLAN for private networks
-# vlan_interface=<None>
-#### (StrOpt) vlans will bridge into this interface if set
+#
+# Options defined in nova.network.quantumv2.api
+#
-# num_networks=1
-#### (IntOpt) Number of networks to support
+# URL for connecting to quantum (string value)
+#quantum_url=http://127.0.0.1:9696
-# vpn_ip=$my_ip
-#### (StrOpt) Public IP for the cloudpipe VPN servers
+# timeout value for connecting to quantum in seconds (integer
+# value)
+#quantum_url_timeout=30
-# vpn_start=1000
-#### (IntOpt) First Vpn port for private networks
+# username for connecting to quantum in admin context (string
+# value)
+#quantum_admin_username=<None>
-# multi_host=false
-#### (BoolOpt) Default value for multi_host in networks
+# password for connecting to quantum in admin context (string
+# value)
+#quantum_admin_password=<None>
-# network_size=256
-#### (IntOpt) Number of addresses in each private subnet
+# tenant name for connecting to quantum in admin context
+# (string value)
+#quantum_admin_tenant_name=<None>
-# floating_range=4.4.4.0/24
-#### (StrOpt) Floating IP address block
+# auth url for connecting to quantum in admin context (string
+# value)
+#quantum_admin_auth_url=http://localhost:5000/v2.0
-# default_floating_pool=nova
-#### (StrOpt) Default pool for floating ips
+# auth strategy for connecting to quantum in admin context
+# (string value)
+#quantum_auth_strategy=keystone
-# fixed_range=10.0.0.0/8
-#### (StrOpt) Fixed IP address block
-# fixed_range_v6=fd00::/48
-#### (StrOpt) Fixed IPv6 address block
+#
+# Options defined in nova.network.rpcapi
+#
-# gateway=<None>
-#### (StrOpt) Default IPv4 gateway
+# the topic network nodes listen on (string value)
+#network_topic=network
-# gateway_v6=<None>
-#### (StrOpt) Default IPv6 gateway
-# cnt_vpn_clients=0
-#### (IntOpt) Number of addresses reserved for vpn clients
+#
+# Options defined in nova.objectstore.s3server
+#
-# fixed_ip_disassociate_timeout=600
-#### (IntOpt) Seconds after which a deallocated ip is disassociated
+# path to s3 buckets (string value)
+#buckets_path=$state_path/buckets
-# create_unique_mac_address_attempts=5
-#### (IntOpt) Number of attempts to create unique mac address
+# IP address for S3 API to listen (string value)
+#s3_listen=0.0.0.0
-# auto_assign_floating_ip=false
-#### (BoolOpt) Autoassigning floating ip to VM
+# port for s3 api to listen (integer value)
+#s3_listen_port=3333
-# network_host=nova
-#### (StrOpt) Network host to use for ip allocation in flat modes
-# fake_call=false
-#### (BoolOpt) If True, skip using the queue and make local calls
+#
+# Options defined in nova.openstack.common.eventlet_backdoor
+#
-# force_dhcp_release=false
-#### (BoolOpt) If True, send a dhcp release on instance termination
+# port for eventlet backdoor to listen (integer value)
+#backdoor_port=<None>
-# dhcp_domain=novalocal
-#### (StrOpt) domain to use for building the hostnames
-# l3_lib=nova.network.l3.LinuxNetL3
-#### (StrOpt) Indicates underlying L3 management library
+#
+# Options defined in nova.openstack.common.lockutils
+#
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
-######## defined in nova.network.quantumv2.api ########
+# Directory to use for lock files (string value)
+#lock_path=/usr/lib/python/site-packages/nova/openstack
-# quantum_url=http://127.0.0.1:9696
-#### (StrOpt) URL for connecting to quantum
-# quantum_url_timeout=30
-#### (IntOpt) timeout value for connecting to quantum in seconds
+#
+# Options defined in nova.openstack.common.log
+#
-# quantum_admin_username=<None>
-#### (StrOpt) username for connecting to quantum in admin context
+# Log output to standard error (boolean value)
+#use_stderr=true
-# quantum_admin_password=<None>
-#### (StrOpt) password for connecting to quantum in admin context
+# Default file mode used when creating log files (string
+# value)
+#logfile_mode=0644
-# quantum_admin_tenant_name=<None>
-#### (StrOpt) tenant name for connecting to quantum in admin context
+# format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
-# quantum_admin_auth_url=http://localhost:5000/v2.0
-#### (StrOpt) auth url for connecting to quantum in admin context
+# format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-# quantum_auth_strategy=keystone
-#### (StrOpt) auth strategy for connecting to quantum in admin context
+# data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+# prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s
-######## defined in nova.objectstore.s3server ########
+# list of logger=LEVEL pairs (list value)
+#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
-# buckets_path=$state_path/buckets
-#### (StrOpt) path to s3 buckets
+# publish error events (boolean value)
+#publish_errors=false
-# s3_listen=0.0.0.0
-#### (StrOpt) IP address for S3 API to listen
+# make deprecations fatal (boolean value)
+#fatal_deprecations=false
-# s3_listen_port=3333
-#### (IntOpt) port for s3 api to listen
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
-######## defined in nova.openstack.common.log ########
-# logdir=<None>
-#### (StrOpt) Log output to a per-service log file in named directory
+#
+# Options defined in nova.openstack.common.notifier.api
+#
-# logfile=<None>
-#### (StrOpt) Log output to a named file
+# Driver or drivers to handle sending notifications (multi
+# valued)
-# use_stderr=true
-#### (BoolOpt) Log output to standard error
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
-# logfile_mode=0644
-#### (StrOpt) Default file mode used when creating log files
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=$host
-# logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
-#### (StrOpt) format string to use for log messages with context
-# logging_default_format_string=%(asctime)s %(levelname)s %(name)s [-] %(instance)s%(message)s
-#### (StrOpt) format string to use for log messages without context
+#
+# Options defined in nova.openstack.common.notifier.rpc_notifier
+#
-# logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d
-#### (StrOpt) data to append to log format when level is DEBUG
+# AMQP topic used for openstack notifications (list value)
+#notification_topics=notifications
-# logging_exception_prefix=%(asctime)s TRACE %(name)s %(instance)s
-#### (StrOpt) prefix each line of exception output with this format
-# default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
-#### (ListOpt) list of logger=LEVEL pairs
+#
+# Options defined in nova.openstack.common.rpc
+#
-# publish_errors=false
-#### (BoolOpt) publish error events
+# The messaging module to use, defaults to kombu. (string
+# value)
+#rpc_backend=nova.openstack.common.rpc.impl_kombu
-# instance_format="[instance: %(uuid)s] "
-#### (StrOpt) If an instance is passed with the log message, format it
-#### like this
+# Size of RPC thread pool (integer value)
+#rpc_thread_pool_size=64
-# instance_uuid_format="[instance: %(uuid)s] "
-#### (StrOpt) If an instance UUID is passed with the log message, format
-#### it like this
+# Size of RPC connection pool (integer value)
+#rpc_conn_pool_size=30
+# Seconds to wait for a response from call or multicall
+# (integer value)
+#rpc_response_timeout=60
-######## defined in nova.openstack.common.notifier.api ########
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
-#### (MultiStrOpt) Driver or drivers to handle sending notifications
+# Modules of exceptions that are permitted to be recreatedupon
+# receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception,cinder.exception,exceptions
-# default_notification_level=INFO
-#### (StrOpt) Default notification level for outgoing notifications
+# If passed, use a fake RabbitMQ provider (boolean value)
+#fake_rabbit=false
-# default_publisher_id=$host
-#### (StrOpt) Default publisher_id for outgoing notifications
+# AMQP exchange to connect to if using RabbitMQ or Qpid
+# (string value)
+#control_exchange=openstack
-######## defined in nova.openstack.common.notifier.rabbit_notifier ########
+#
+# Options defined in nova.openstack.common.rpc.impl_kombu
+#
-# notification_topics=notifications
-#### (ListOpt) AMQP topic used for openstack notifications
+# SSL version to use (valid only if SSL enabled) (string
+# value)
+#kombu_ssl_version=
+# SSL key file (valid only if SSL enabled) (string value)
+#kombu_ssl_keyfile=
-######## defined in nova.openstack.common.rpc ########
+# SSL cert file (valid only if SSL enabled) (string value)
+#kombu_ssl_certfile=
-# rpc_backend=nova.openstack.common.rpc.impl_kombu
-#### (StrOpt) The messaging module to use, defaults to kombu.
+# SSL certification authority file (valid only if SSL enabled)
+# (string value)
+#kombu_ssl_ca_certs=
-# rpc_thread_pool_size=64
-#### (IntOpt) Size of RPC thread pool
+# The RabbitMQ broker address where a single node is used
+# (string value)
+#rabbit_host=localhost
-# rpc_conn_pool_size=30
-#### (IntOpt) Size of RPC connection pool
+# The RabbitMQ broker port where a single node is used
+# (integer value)
+#rabbit_port=5672
-# rpc_response_timeout=60
-#### (IntOpt) Seconds to wait for a response from call or multicall
+# RabbitMQ HA cluster host:port pairs (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
-# rpc_cast_timeout=30
-#### (IntOpt) Seconds to wait before a cast expires (TTL). Only supported
-#### by impl_zmq.
+# connect over SSL for RabbitMQ (boolean value)
+#rabbit_use_ssl=false
-# allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception
-#### (ListOpt) Modules of exceptions that are permitted to be recreatedupon
-#### receiving exception data from an rpc call.
+# the RabbitMQ userid (string value)
+#rabbit_userid=guest
-# control_exchange=nova
-#### (StrOpt) AMQP exchange to connect to if using RabbitMQ or Qpid
+# the RabbitMQ password (string value)
+#rabbit_password=guest
-# fake_rabbit=false
-#### (BoolOpt) If passed, use a fake RabbitMQ provider
+# the RabbitMQ virtual host (string value)
+#rabbit_virtual_host=/
+# how frequently to retry connecting with RabbitMQ (integer
+# value)
+#rabbit_retry_interval=1
-######## defined in nova.openstack.common.rpc.impl_kombu ########
+# how long to backoff for between retries when connecting to
+# RabbitMQ (integer value)
+#rabbit_retry_backoff=2
-# kombu_ssl_version=
-#### (StrOpt) SSL version to use (valid only if SSL enabled)
+# maximum retries with trying to connect to RabbitMQ (the
+# default of 0 implies an infinite retry count) (integer
+# value)
+#rabbit_max_retries=0
-# kombu_ssl_keyfile=
-#### (StrOpt) SSL key file (valid only if SSL enabled)
+# use durable queues in RabbitMQ (boolean value)
+#rabbit_durable_queues=false
-# kombu_ssl_certfile=
-#### (StrOpt) SSL cert file (valid only if SSL enabled)
+# use H/A queues in RabbitMQ (x-ha-policy: all).You need to
+# wipe RabbitMQ database when changing this option. (boolean
+# value)
+#rabbit_ha_queues=false
-# kombu_ssl_ca_certs=
-#### (StrOpt) SSL certification authority file (valid only if SSL enabled)
-# rabbit_host=localhost
-#### (StrOpt) the RabbitMQ host
+#
+# Options defined in nova.openstack.common.rpc.impl_qpid
+#
-# rabbit_port=5672
-#### (IntOpt) the RabbitMQ port
+# Qpid broker hostname (string value)
+#qpid_hostname=localhost
-# rabbit_use_ssl=false
-#### (BoolOpt) connect over SSL for RabbitMQ
+# Qpid broker port (string value)
+#qpid_port=5672
-# rabbit_userid=guest
-#### (StrOpt) the RabbitMQ userid
+# Qpid HA cluster host:port pairs (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
-# rabbit_password=guest
-#### (StrOpt) the RabbitMQ password
+# Username for qpid connection (string value)
+#qpid_username=
-# rabbit_virtual_host=/
-#### (StrOpt) the RabbitMQ virtual host
+# Password for qpid connection (string value)
+#qpid_password=
-# rabbit_retry_interval=1
-#### (IntOpt) how frequently to retry connecting with RabbitMQ
+# Space separated list of SASL mechanisms to use for auth
+# (string value)
+#qpid_sasl_mechanisms=
-# rabbit_retry_backoff=2
-#### (IntOpt) how long to backoff for between retries when connecting to
-#### RabbitMQ
+# Seconds between connection keepalive heartbeats (integer
+# value)
+#qpid_heartbeat=60
-# rabbit_max_retries=0
-#### (IntOpt) maximum retries with trying to connect to RabbitMQ (the
-#### default of 0 implies an infinite retry count)
+# Transport to use, either 'tcp' or 'ssl' (string value)
+#qpid_protocol=tcp
-# rabbit_durable_queues=false
-#### (BoolOpt) use durable queues in RabbitMQ
+# Disable Nagle algorithm (boolean value)
+#qpid_tcp_nodelay=true
-######## defined in nova.openstack.common.rpc.impl_qpid ########
+#
+# Options defined in nova.openstack.common.rpc.impl_zmq
+#
-# qpid_hostname=localhost
-#### (StrOpt) Qpid broker hostname
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
-# qpid_port=5672
-#### (StrOpt) Qpid broker port
+# MatchMaker driver (string value)
+#rpc_zmq_matchmaker=nova.openstack.common.rpc.matchmaker.MatchMakerLocalhost
-# qpid_username=
-#### (StrOpt) Username for qpid connection
+# ZeroMQ receiver listening port (integer value)
+#rpc_zmq_port=9501
-# qpid_password=
-#### (StrOpt) Password for qpid connection
+# Number of ZeroMQ contexts, defaults to 1 (integer value)
+#rpc_zmq_contexts=1
-# qpid_sasl_mechanisms=
-#### (StrOpt) Space separated list of SASL mechanisms to use for auth
+# Directory for holding IPC sockets (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
-# qpid_reconnect=true
-#### (BoolOpt) Automatically reconnect
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=sorcha
-# qpid_reconnect_timeout=0
-#### (IntOpt) Reconnection timeout in seconds
-# qpid_reconnect_limit=0
-#### (IntOpt) Max reconnections before giving up
+#
+# Options defined in nova.openstack.common.rpc.matchmaker
+#
-# qpid_reconnect_interval_min=0
-#### (IntOpt) Minimum seconds between reconnection attempts
+# Matchmaker ring file (JSON) (string value)
+#matchmaker_ringfile=/etc/nova/matchmaker_ring.json
-# qpid_reconnect_interval_max=0
-#### (IntOpt) Maximum seconds between reconnection attempts
-# qpid_reconnect_interval=0
-#### (IntOpt) Equivalent to setting max and min to the same value
+#
+# Options defined in nova.scheduler.driver
+#
-# qpid_heartbeat=60
-#### (IntOpt) Seconds between connection keepalive heartbeats
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=nova.scheduler.host_manager.HostManager
-# qpid_protocol=tcp
-#### (StrOpt) Transport to use, either 'tcp' or 'ssl'
+# Maximum number of attempts to schedule an instance (integer
+# value)
+#scheduler_max_attempts=3
-# qpid_tcp_nodelay=true
-#### (BoolOpt) Disable Nagle algorithm
+#
+# Options defined in nova.scheduler.filters.core_filter
+#
-######## defined in nova.openstack.common.rpc.impl_zmq ########
+# Virtual CPU to Physical CPU allocation ratio (floating point
+# value)
+#cpu_allocation_ratio=16.0
-# rpc_zmq_bind_address=*
-#### (StrOpt) ZeroMQ bind address. Should be a wildcard (*), an ethernet
-#### interface, or IP. The "host" option should point or resolve
-#### to this address.
-# rpc_zmq_matchmaker=nova.openstack.common.rpc.matchmaker.MatchMakerLocalhost
-#### (StrOpt) MatchMaker driver
+#
+# Options defined in nova.scheduler.filters.disk_filter
+#
-# rpc_zmq_port=9501
-#### (IntOpt) ZeroMQ receiver listening port
+# virtual disk to physical disk allocation ratio (floating
+# point value)
+#disk_allocation_ratio=1.0
-# rpc_zmq_contexts=1
-#### (IntOpt) Number of ZeroMQ contexts, defaults to 1
-# rpc_zmq_ipc_dir=/var/run/openstack
-#### (StrOpt) Directory for holding IPC sockets
+#
+# Options defined in nova.scheduler.filters.io_ops_filter
+#
-# rpc_zmq_host=nova
-#### (StrOpt) Name of this node. Must be a valid hostname, FQDN, or IP
-#### address. Must match "host" option, if running Nova.
+# Ignore hosts that have too many
+# builds/resizes/snaps/migrations (integer value)
+#max_io_ops_per_host=8
-######## defined in nova.openstack.common.rpc.matchmaker ########
+#
+# Options defined in nova.scheduler.filters.isolated_hosts_filter
+#
-# matchmaker_ringfile=/etc/nova/matchmaker_ring.json
-#### (StrOpt) Matchmaker ring file (JSON)
+# Images to run on isolated host (list value)
+#isolated_images=
+# Host reserved for specific images (list value)
+#isolated_hosts=
-######## defined in nova.scheduler.driver ########
-# scheduler_host_manager=nova.scheduler.host_manager.HostManager
-#### (StrOpt) The scheduler host manager class to use
+#
+# Options defined in nova.scheduler.filters.num_instances_filter
+#
-# scheduler_max_attempts=3
-#### (IntOpt) Maximum number of attempts to schedule an instance
+# Ignore hosts that have too many instances (integer value)
+#max_instances_per_host=50
-######## defined in nova.scheduler.filters.core_filter ########
+#
+# Options defined in nova.scheduler.filters.ram_filter
+#
-# cpu_allocation_ratio=16.0
-#### (FloatOpt) Virtual CPU to Physical CPU allocation ratio
+# virtual ram to physical ram allocation ratio (floating point
+# value)
+#ram_allocation_ratio=1.5
-######## defined in nova.scheduler.filters.disk_filter ########
+#
+# Options defined in nova.scheduler.host_manager
+#
-# disk_allocation_ratio=1.0
-#### (FloatOpt) virtual disk to physical disk allocation ratio
+# Filter classes available to the scheduler which may be
+# specified more than once. An entry of
+# "nova.scheduler.filters.standard_filters" maps to all
+# filters included with nova. (multi valued)
+#scheduler_available_filters=nova.scheduler.filters.all_filters
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
-######## defined in nova.scheduler.filters.ram_filter ########
+# Which weight class names to use for weighing hosts (list
+# value)
+#scheduler_weight_classes=nova.scheduler.weights.all_weighers
-# ram_allocation_ratio=1.5
-#### (FloatOpt) virtual ram to physical ram allocation ratio
+#
+# Options defined in nova.scheduler.manager
+#
-######## defined in nova.scheduler.filters.trusted_filter ########
+# Default driver to use for the scheduler (string value)
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-# server=<None>
-#### (StrOpt) attestation server http
-# server_ca_file=<None>
-#### (StrOpt) attestation server Cert file for Identity verification
+#
+# Options defined in nova.scheduler.multi
+#
-# port=8443
-#### (StrOpt) attestation server port
+# Driver to use for scheduling compute calls (string value)
+#compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-# api_url=/OpenAttestationWebServices/V1.0
-#### (StrOpt) attestation web API URL
+# Default driver to use for scheduling calls (string value)
+#default_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-# auth_blob=<None>
-#### (StrOpt) attestation authorization blob - must change
+#
+# Options defined in nova.scheduler.rpcapi
+#
-######## defined in nova.scheduler.host_manager ########
+# the topic scheduler nodes listen on (string value)
+#scheduler_topic=scheduler
-# scheduler_available_filters=nova.scheduler.filters.standard_filters
-#### (MultiStrOpt) Filter classes available to the scheduler which may be
-#### specified more than once. An entry of
-#### "nova.scheduler.filters.standard_filters" maps to all
-#### filters included with nova.
-# scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
-#### (ListOpt) Which filter class names to use for filtering hosts when not
-#### specified in the request.
+#
+# Options defined in nova.scheduler.scheduler_options
+#
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
-######## defined in nova.scheduler.least_cost ########
-# least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn
-#### (ListOpt) Which cost functions the LeastCostScheduler should use
+#
+# Options defined in nova.scheduler.weights.least_cost
+#
-# noop_cost_fn_weight=1.0
-#### (FloatOpt) How much weight to give the noop cost function
+# Which cost functions the LeastCostScheduler should use (list
+# value)
+#least_cost_functions=<None>
-# compute_fill_first_cost_fn_weight=-1.0
-#### (FloatOpt) How much weight to give the fill-first cost function. A
-#### negative value will reverse behavior: e.g. spread-first
+# How much weight to give the noop cost function (floating
+# point value)
+#noop_cost_fn_weight=1.0
+# How much weight to give the fill-first cost function. A
+# negative value will reverse behavior: e.g. spread-first
+# (floating point value)
+#compute_fill_first_cost_fn_weight=<None>
-######## defined in nova.scheduler.manager ########
-# scheduler_driver=nova.scheduler.multi.MultiScheduler
-#### (StrOpt) Default driver to use for the scheduler
+#
+# Options defined in nova.scheduler.weights.ram
+#
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=1.0
-######## defined in nova.scheduler.multi ########
-# compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-#### (StrOpt) Driver to use for scheduling compute calls
+#
+# Options defined in nova.servicegroup.api
+#
-# volume_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-#### (StrOpt) Driver to use for scheduling volume calls
+# The driver for servicegroup service. (string value)
+#servicegroup_driver=db
-# default_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-#### (StrOpt) Default driver to use for scheduling calls
+#
+# Options defined in nova.virt.baremetal.db.api
+#
-######## defined in nova.scheduler.scheduler_options ########
+# The backend to use for db (string value)
+#baremetal_db_backend=sqlalchemy
-# scheduler_json_config_location=
-#### (StrOpt) Absolute path to scheduler configuration JSON file.
+#
+# Options defined in nova.virt.configdrive
+#
-######## defined in nova.scheduler.simple ########
+# Config drive format. One of iso9660 (default) or vfat
+# (string value)
+#config_drive_format=iso9660
-# max_gigabytes=10000
-#### (IntOpt) maximum number of volume gigabytes to allow per host
+# Where to put temporary files associated with config drive
+# creation (string value)
+#config_drive_tempdir=<None>
+# Set to force injection to take place on a config drive (if
+# set, valid options are: always) (string value)
+#force_config_drive=<None>
-######## defined in nova.virt.baremetal.driver ########
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#mkisofs_cmd=genisoimage
-# baremetal_type=baremetal
-#### (StrOpt) baremetal domain type
+#
+# Options defined in nova.virt.disk.api
+#
-######## defined in nova.virt.baremetal.nodes ########
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/interfaces.template
-# baremetal_driver=tilera
-#### (StrOpt) Bare-metal driver runs on
+# mkfs commands for ephemeral device. The format is
+# <os_type>=<mkfs command> (multi valued)
+#virt_mkfs=default=mkfs.ext3 -L %(fs_label)s -F %(target)s
+#virt_mkfs=linux=mkfs.ext3 -L %(fs_label)s -F %(target)s
+#virt_mkfs=windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s
-######## defined in nova.virt.baremetal.tilera ########
+#
+# Options defined in nova.virt.disk.mount.nbd
+#
-# tile_monitor=/usr/local/TileraMDE/bin/tile-monitor
-#### (StrOpt) Tilera command line program for Bare-metal driver
+# time to wait for a NBD device coming up (integer value)
+#timeout_nbd=10
-######## defined in nova.virt.configdrive ########
+#
+# Options defined in nova.virt.driver
+#
-# config_drive_format=iso9660
-#### (StrOpt) Config drive format. One of iso9660 (default) or vfat
+# Driver to use for controlling virtualization. Options
+# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+# fake.FakeDriver, baremetal.BareMetalDriver,
+# vmwareapi.VMWareESXDriver (string value)
+#compute_driver=<None>
-# config_drive_tempdir=<None>
-#### (StrOpt) Where to put temporary files associated with config drive
-#### creation
+# The default format an ephemeral_volume will be formatted
+# with on creation. (string value)
+#default_ephemeral_format=<None>
+# Whether to use cow images (boolean value)
+#use_cow_images=true
-######## defined in nova.virt.disk.api ########
-# injected_network_template=$pybasedir/nova/virt/interfaces.template
-#### (StrOpt) Template file for injected network
+#
+# Options defined in nova.virt.firewall
+#
-# img_handlers=loop,nbd,guestfs
-#### (ListOpt) Order of methods used to mount disk images
+# Firewall driver (defaults to hypervisor specific iptables
+# driver) (string value)
+#firewall_driver=<None>
-# virt_mkfs=default=mkfs.ext3 -L %(fs_label)s -F %(target)s
-# virt_mkfs=linux=mkfs.ext3 -L %(fs_label)s -F %(target)s
-# virt_mkfs=windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s
-#### (MultiStrOpt) mkfs commands for ephemeral device. The format is
-#### <os_type>=<mkfs command>
+# Whether to allow network traffic from same network (boolean
+# value)
+#allow_same_net_traffic=true
-######## defined in nova.virt.disk.nbd ########
+#
+# Options defined in nova.virt.hyperv.vmops
+#
-# timeout_nbd=10
-#### (IntOpt) time to wait for a NBD device coming up
+# Default vSwitch Name, if none provided first external is
+# used (string value)
+#vswitch_name=<None>
-# max_nbd_devices=16
-#### (IntOpt) maximum number of possible nbd devices
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
-######## defined in nova.virt.firewall ########
+# qemu-img is used to convert between different image types
+# (string value)
+#qemu_img_cmd=qemu-img.exe
-# firewall_driver=<None>
-#### (StrOpt) Firewall driver (defaults to hypervisor specific iptables
-#### driver)
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
-# allow_same_net_traffic=true
-#### (BoolOpt) Whether to allow network traffic from same network
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
-######## defined in nova.virt.hyperv.vmops ########
+# The number of times we retry on attaching volume (integer
+# value)
+#hyperv_attaching_volume_retry_count=10
-# vswitch_name=<None>
-#### (StrOpt) Default vSwitch Name, if none provided first external is
-#### used
+# The seconds to wait between an volume attachment attempt
+# (integer value)
+#hyperv_wait_between_attach_retry=5
-# limit_cpu_features=false
-#### (BoolOpt) required for live migration among hosts with different CPU
-#### features
+# Force volumeutils v1 (boolean value)
+#force_volumeutils_v1=false
-######## defined in nova.virt.hyperv.volumeops ########
+#
+# Options defined in nova.virt.images
+#
-# hyperv_attaching_volume_retry_count=10
-#### (IntOpt) The number of times we retry on attaching volume
+# Force backing images to raw format (boolean value)
+#force_raw_images=true
-# hyperv_wait_between_attach_retry=5
-#### (IntOpt) The seconds to wait between an volume attachment attempt
+#
+# Options defined in nova.virt.libvirt.driver
+#
-######## defined in nova.virt.images ########
+# Rescue ami image (string value)
+#rescue_image_id=<None>
-# force_raw_images=true
-#### (BoolOpt) Force backing images to raw format
+# Rescue aki image (string value)
+#rescue_kernel_id=<None>
+# Rescue ari image (string value)
+#rescue_ramdisk_id=<None>
-######## defined in nova.virt.libvirt.driver ########
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen) (string value)
+#libvirt_type=kvm
-# rescue_image_id=<None>
-#### (StrOpt) Rescue ami image
+# Override the default libvirt URI (which is dependent on
+# libvirt_type) (string value)
+#libvirt_uri=
-# rescue_kernel_id=<None>
-#### (StrOpt) Rescue aki image
+# Inject the admin password at boot time, without an agent.
+# (boolean value)
+#libvirt_inject_password=false
-# rescue_ramdisk_id=<None>
-#### (StrOpt) Rescue ari image
+# Inject the ssh public key at boot time (boolean value)
+#libvirt_inject_key=true
-# libvirt_type=kvm
-#### (StrOpt) Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
-#### xen)
+# The partition to inject to : -1 => inspect (libguestfs
+# only), 0 => not partitioned, >0 => partition number (integer
+# value)
+#libvirt_inject_partition=1
-# libvirt_uri=
-#### (StrOpt) Override the default libvirt URI (which is dependent on
-#### libvirt_type)
+# Sync virtual and real mouse cursors in Windows VMs (boolean
+# value)
+#use_usb_tablet=true
-# libvirt_inject_password=false
-#### (BoolOpt) Inject the admin password at boot time, without an agent.
+# Migration target URI (any included "%s" is replaced with the
+# migration target hostname) (string value)
+#live_migration_uri=qemu+tcp://%s/system
-# libvirt_inject_key=true
-#### (BoolOpt) Inject the ssh public key at boot time
+# Migration flags to be set for live migration (string value)
+#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER
-# libvirt_inject_partition=1
-#### (IntOpt) The partition to inject to : -1 => inspect (libguestfs
-#### only), 0 => not partitioned, >0 => partition number
+# Migration flags to be set for block migration (string value)
+#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC
-# use_usb_tablet=true
-#### (BoolOpt) Sync virtual and real mouse cursors in Windows VMs
+# Maximum bandwidth to be used during migration, in Mbps
+# (integer value)
+#live_migration_bandwidth=0
-# live_migration_uri=qemu+tcp://%s/system
-#### (StrOpt) Migration target URI (any included "%s" is replaced with the
-#### migration target hostname)
+# Snapshot image format (valid options are : raw, qcow2, vmdk,
+# vdi). Defaults to same as source image (string value)
+#snapshot_image_format=<None>
-# live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER
-#### (StrOpt) Migration flags to be set for live migration
+# The libvirt VIF driver to configure the VIFs. (string value)
+#libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtBridgeDriver
-# block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC
-#### (StrOpt) Migration flags to be set for block migration
+# Libvirt handlers for remote volumes. (list value)
+#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume_nfs.NfsVolumeDriver
-# live_migration_bandwidth=0
-#### (IntOpt) Maximum bandwidth to be used during migration, in Mbps
+# Override the default disk prefix for the devices attached to
+# a server, which is dependent on libvirt_type. (valid options
+# are: sd, xvd, uvd, vd) (string value)
+#libvirt_disk_prefix=<None>
-# snapshot_image_format=<None>
-#### (StrOpt) Snapshot image format (valid options are : raw, qcow2, vmdk,
-#### vdi). Defaults to same as source image
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#libvirt_wait_soft_reboot_seconds=120
-# libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtBridgeDriver
-#### (StrOpt) The libvirt VIF driver to configure the VIFs.
+# Use a separated OS thread pool to realize non-blocking
+# libvirt calls (boolean value)
+#libvirt_nonblocking=true
-# libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver
-#### (ListOpt) Libvirt handlers for remote volumes.
+# Set to "host-model" to clone the host CPU feature flags; to
+# "host-passthrough" to use the host CPU model exactly; to
+# "custom" to use a named CPU model; to "none" to not set any
+# CPU model. If libvirt_type="kvm|qemu", it will default to
+# "host-model", otherwise it will default to "none" (string
+# value)
+#libvirt_cpu_mode=<None>
-# libvirt_disk_prefix=<None>
-#### (StrOpt) Override the default disk prefix for the devices attached to
-#### a server, which is dependent on libvirt_type. (valid options
-#### are: sd, xvd, uvd, vd)
+# Set to a named libvirt CPU model (see names listed in
+# /usr/share/libvirt/cpu_map.xml). Only has effect if
+# libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"
+# (string value)
+#libvirt_cpu_model=<None>
-# libvirt_wait_soft_reboot_seconds=120
-#### (IntOpt) Number of seconds to wait for instance to shut down after
-#### soft reboot request is made. We fall back to hard reboot if
-#### instance does not shutdown within this window.
+# Location where libvirt driver will store snapshots before
+# uploading them to image service (string value)
+#libvirt_snapshots_directory=$instances_path/snapshots
-# libvirt_nonblocking=true
-#### (BoolOpt) Use a separated OS thread pool to realize non-blocking
-#### libvirt calls
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader
-# force_config_drive=<None>
-#### (StrOpt) Set to force injection to take place on a config drive (if
-#### set, valid options are: always)
-# libvirt_cpu_mode=<None>
-#### (StrOpt) Set to "host-model" to clone the host CPU feature flags; to
-#### "host-passthrough" to use the host CPU model exactly; to
-#### "custom" to use a named CPU model; to "none" to not set any
-#### CPU model. If libvirt_type="kvm|qemu", it will default to
-#### "host-model", otherwise it will default to "none"
+#
+# Options defined in nova.virt.libvirt.imagebackend
+#
-# libvirt_cpu_model=<None>
-#### (StrOpt) Set to a named libvirt CPU model (see names listed in
-#### /usr/share/libvirt/cpu_map.xml). Only has effect if
-#### libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"
+# VM Images format. Acceptable values are: raw, qcow2, lvm,
+# default. If default is specified, then use_cow_images flag
+# is used instead of this one. (string value)
+#libvirt_images_type=default
-# libvirt_snapshots_directory=$instances_path/snapshots
-#### (StrOpt) Location where libvirt driver will store snapshots before
-#### uploading them to image service
+# LVM Volume Group that is used for VM images, when you
+# specify libvirt_images_type=lvm. (string value)
+#libvirt_images_volume_group=<None>
+# Create sparse logical volumes (with virtualsize) if this
+# flag is set to True. (boolean value)
+#libvirt_sparse_logical_volumes=false
-######## defined in nova.virt.libvirt.imagebackend ########
-# libvirt_images_type=default
-#### (StrOpt) VM Images format. Acceptable values are: raw, qcow2, lvm,
-#### default. If default is specified, then use_cow_images flag
-#### is used instead of this one.
+#
+# Options defined in nova.virt.libvirt.imagecache
+#
-# libvirt_images_volume_group=<None>
-#### (StrOpt) LVM Volume Group that is used for VM images, when you
-#### specify libvirt_images_type=lvm.
+# Where cached images are stored under $instances_path.This is
+# NOT the full path - just a folder name.For per-compute-host
+# cached images, set to _base_$my_ip (string value)
+#base_dir_name=_base
-# libvirt_sparse_logical_volumes=false
-#### (BoolOpt) Create sparse logical volumes (with virtualsize) if this
-#### flag is set to True.
+# Allows image information files to be stored in non-standard
+# locations (string value)
+#image_info_filename_pattern=$instances_path/$base_dir_name/%(image)s.info
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images=true
-######## defined in nova.virt.libvirt.imagecache ########
+# Unused resized base images younger than this will not be
+# removed (integer value)
+#remove_unused_resized_minimum_age_seconds=3600
-# remove_unused_base_images=true
-#### (BoolOpt) Should unused base images be removed?
+# Unused unresized base images younger than this will not be
+# removed (integer value)
+#remove_unused_original_minimum_age_seconds=86400
-# remove_unused_resized_minimum_age_seconds=3600
-#### (IntOpt) Unused resized base images younger than this will not be
-#### removed
+# Write a checksum for files in _base to disk (boolean value)
+#checksum_base_images=false
-# remove_unused_original_minimum_age_seconds=86400
-#### (IntOpt) Unused unresized base images younger than this will not be
-#### removed
+# How frequently to checksum base images (integer value)
+#checksum_interval_seconds=3600
-# checksum_base_images=false
-#### (BoolOpt) Write a checksum for files in _base to disk
+#
+# Options defined in nova.virt.libvirt.vif
+#
-######## defined in nova.virt.libvirt.utils ########
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#libvirt_ovs_bridge=br-int
-# image_info_filename_pattern=$instances_path/$base_dir_name/%(image)s.info
-#### (StrOpt) Allows image information files to be stored in non-standard
-#### locations
+# Use virtio for bridge interfaces with KVM/QEMU (boolean
+# value)
+#libvirt_use_virtio_for_bridges=true
-######## defined in nova.virt.libvirt.vif ########
+#
+# Options defined in nova.virt.libvirt.volume
+#
-# libvirt_ovs_bridge=br-int
-#### (StrOpt) Name of Integration Bridge used by Open vSwitch
+# number of times to rescan iSCSI target to find volume
+# (integer value)
+#num_iscsi_scan_tries=3
-# libvirt_use_virtio_for_bridges=false
-#### (BoolOpt) Use virtio for bridge interfaces
+# the RADOS client name for accessing rbd volumes (string
+# value)
+#rbd_user=<None>
+# the libvirt uuid of the secret for the rbd_uservolumes
+# (string value)
+#rbd_secret_uuid=<None>
-######## defined in nova.virt.libvirt.volume_nfs ########
-# nfs_mount_point_base=$state_path/mnt
-#### (StrOpt) Base dir where nfs expected to be mounted on compute
+#
+# Options defined in nova.virt.libvirt.volume_nfs
+#
+# Base dir where nfs expected to be mounted on compute (string
+# value)
+#nfs_mount_point_base=$state_path/mnt
-######## defined in nova.virt.powervm.driver ########
-# powervm_mgr_type=ivm
-#### (StrOpt) PowerVM manager type (ivm, hmc)
+#
+# Options defined in nova.virt.powervm.driver
+#
-# powervm_mgr=<None>
-#### (StrOpt) PowerVM manager host or ip
+# PowerVM manager type (ivm, hmc) (string value)
+#powervm_mgr_type=ivm
-# powervm_mgr_user=<None>
-#### (StrOpt) PowerVM manager user name
+# PowerVM manager host or ip (string value)
+#powervm_mgr=<None>
-# powervm_mgr_passwd=<None>
-#### (StrOpt) PowerVM manager user password
+# PowerVM manager user name (string value)
+#powervm_mgr_user=<None>
-# powervm_img_remote_path=<None>
-#### (StrOpt) PowerVM image remote path
+# PowerVM manager user password (string value)
+#powervm_mgr_passwd=<None>
-# powervm_img_local_path=<None>
-#### (StrOpt) Local directory to download glance images to
+# PowerVM image remote path (string value)
+#powervm_img_remote_path=<None>
+# Local directory to download glance images to (string value)
+#powervm_img_local_path=<None>
-######## defined in nova.virt.vmwareapi.driver ########
-# vmwareapi_host_ip=<None>
-#### (StrOpt) URL for connection to VMWare ESX host.Required if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+#
+# Options defined in nova.virt.vmwareapi.driver
+#
-# vmwareapi_host_username=<None>
-#### (StrOpt) Username for connection to VMWare ESX host. Used only if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+# URL for connection to VMWare ESX host.Required if
+# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+#vmwareapi_host_ip=<None>
-# vmwareapi_host_password=<None>
-#### (StrOpt) Password for connection to VMWare ESX host. Used only if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+# Username for connection to VMWare ESX host. Used only if
+# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+#vmwareapi_host_username=<None>
-# vmwareapi_task_poll_interval=5.0
-#### (FloatOpt) The interval used for polling of remote tasks. Used only if
-#### compute_driver is vmwareapi.VMWareESXDriver.
+# Password for connection to VMWare ESX host. Used only if
+# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+#vmwareapi_host_password=<None>
-# vmwareapi_api_retry_count=10
-#### (IntOpt) The number of times we retry on failures, e.g., socket
-#### error, etc. Used only if compute_driver is
-#### vmwareapi.VMWareESXDriver.
+# The interval used for polling of remote tasks. Used only if
+# compute_driver is vmwareapi.VMWareESXDriver. (floating point
+# value)
+#vmwareapi_task_poll_interval=5.0
-# vmwareapi_vlan_interface=vmnic0
-#### (StrOpt) Physical ethernet adapter name for vlan networking
+# The number of times we retry on failures, e.g., socket
+# error, etc. Used only if compute_driver is
+# vmwareapi.VMWareESXDriver. (integer value)
+#vmwareapi_api_retry_count=10
-######## defined in nova.virt.vmwareapi.vim ########
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
-# vmwareapi_wsdl_loc=<None>
-#### (StrOpt) VIM Service WSDL Location e.g
-#### http://<server>/vimService.wsdl. Due to a bug in vSphere ESX
-#### 4.1 default wsdl. Refer readme-vmware to setup
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vmwareapi_vlan_interface=vmnic0
-######## defined in nova.virt.vmwareapi.vmops ########
+#
+# Options defined in nova.virt.vmwareapi.vim
+#
-# vmware_vif_driver=nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver
-#### (StrOpt) The VMWare VIF driver to configure the VIFs.
+# VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Due to a bug in vSphere ESX
+# 4.1 default wsdl. Refer readme-vmware to setup (string
+# value)
+#vmwareapi_wsdl_loc=<None>
-######## defined in nova.virt.xenapi.agent ########
+#
+# Options defined in nova.virt.xenapi.agent
+#
-# agent_version_timeout=300
-#### (IntOpt) number of seconds to wait for agent to be fully operational
+# number of seconds to wait for agent reply (integer value)
+#agent_timeout=30
+# number of seconds to wait for agent to be fully operational
+# (integer value)
+#agent_version_timeout=300
-######## defined in nova.virt.xenapi.driver ########
+# number of seconds to wait for agent reply to resetnetwork
+# request (integer value)
+#agent_resetnetwork_timeout=60
-# xenapi_connection_url=<None>
-#### (StrOpt) URL for connection to XenServer/Xen Cloud Platform. Required
-#### if compute_driver=xenapi.XenAPIDriver
+# Specifies the path in which the xenapi guest agent should be
+# located. If the agent is present, network configuration is
+# not injected into the image. Used if
+# compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# (string value)
+#xenapi_agent_path=usr/sbin/xe-update-networking
-# xenapi_connection_username=root
-#### (StrOpt) Username for connection to XenServer/Xen Cloud Platform.
-#### Used only if compute_driver=xenapi.XenAPIDriver
+# Disable XenAPI agent. Reduces the amount of time it takes
+# nova to detect that a VM has started, when that VM does not
+# have the agent installed (boolean value)
+#xenapi_disable_agent=false
-# xenapi_connection_password=<None>
-#### (StrOpt) Password for connection to XenServer/Xen Cloud Platform.
-#### Used only if compute_driver=xenapi.XenAPIDriver
-# xenapi_connection_concurrent=5
-#### (IntOpt) Maximum number of concurrent XenAPI connections. Used only
-#### if compute_driver=xenapi.XenAPIDriver
+#
+# Options defined in nova.virt.xenapi.driver
+#
-# xenapi_vhd_coalesce_poll_interval=5.0
-#### (FloatOpt) The interval used for polling of coalescing vhds. Used only
-#### if compute_driver=xenapi.XenAPIDriver
+# URL for connection to XenServer/Xen Cloud Platform. Required
+# if compute_driver=xenapi.XenAPIDriver (string value)
+#xenapi_connection_url=<None>
-# xenapi_check_host=true
-#### (BoolOpt) Ensure compute service is running on host XenAPI connects
-#### to.
+# Username for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#xenapi_connection_username=root
-# xenapi_vhd_coalesce_max_attempts=5
-#### (IntOpt) Max number of times to poll for VHD to coalesce. Used only
-#### if compute_driver=xenapi.XenAPIDriver
+# Password for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#xenapi_connection_password=<None>
-# xenapi_agent_path=usr/sbin/xe-update-networking
-#### (StrOpt) Specifies the path in which the xenapi guest agent should be
-#### located. If the agent is present, network configuration is
-#### not injected into the image. Used if
-#### compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# Maximum number of concurrent XenAPI connections. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#xenapi_connection_concurrent=5
-# xenapi_sr_base_path=/var/run/sr-mount
-#### (StrOpt) Base path to the storage repository
+# The interval used for polling of coalescing vhds. Used only
+# if compute_driver=xenapi.XenAPIDriver (floating point value)
+#xenapi_vhd_coalesce_poll_interval=5.0
-# target_host=<None>
-#### (StrOpt) iSCSI Target Host
+# Ensure compute service is running on host XenAPI connects
+# to. (boolean value)
+#xenapi_check_host=true
-# target_port=3260
-#### (StrOpt) iSCSI Target Port, 3260 Default
+# Max number of times to poll for VHD to coalesce. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#xenapi_vhd_coalesce_max_attempts=5
-# iqn_prefix=iqn.2010-10.org.openstack
-#### (StrOpt) IQN Prefix
+# Base path to the storage repository (string value)
+#xenapi_sr_base_path=/var/run/sr-mount
-# xenapi_remap_vbd_dev=false
-#### (BoolOpt) Used to enable the remapping of VBD dev (Works around an
-#### issue in Ubuntu Maverick)
+# iSCSI Target Host (string value)
+#target_host=<None>
-# xenapi_remap_vbd_dev_prefix=sd
-#### (StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
-#### /dev/sdb)
+# iSCSI Target Port, 3260 Default (string value)
+#target_port=3260
-# xenapi_login_timeout=10
-#### (IntOpt) Timeout in seconds for XenAPI login.
+# IQN Prefix (string value)
+#iqn_prefix=iqn.2010-10.org.openstack
+# Used to enable the remapping of VBD dev (Works around an
+# issue in Ubuntu Maverick) (boolean value)
+#xenapi_remap_vbd_dev=false
-######## defined in nova.virt.xenapi.pool ########
+# Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
+# /dev/sdb) (string value)
+#xenapi_remap_vbd_dev_prefix=sd
-# use_join_force=true
-#### (BoolOpt) To use for hosts with different CPUs
+# Timeout in seconds for XenAPI login. (integer value)
+#xenapi_login_timeout=10
-######## defined in nova.virt.xenapi.vif ########
+#
+# Options defined in nova.virt.xenapi.pool
+#
-# xenapi_ovs_integration_bridge=xapi1
-#### (StrOpt) Name of Integration Bridge used by Open vSwitch
+# To use for hosts with different CPUs (boolean value)
+#use_join_force=true
-######## defined in nova.virt.xenapi.vm_utils ########
+#
+# Options defined in nova.virt.xenapi.vif
+#
-# default_os_type=linux
-#### (StrOpt) Default OS type
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#xenapi_ovs_integration_bridge=xapi1
-# block_device_creation_timeout=10
-#### (IntOpt) Time to wait for a block device to be created
-# max_kernel_ramdisk_size=16777216
-#### (IntOpt) Maximum size in bytes of kernel or ramdisk images
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
-# sr_matching_filter=other-config:i18n-key=local-storage
-#### (StrOpt) Filter for finding the SR to be used to install guest
-#### instances on. The default value is the Local Storage in
-#### default XenServer/XCP installations. To select an SR with a
-#### different matching criteria, you could set it to other-
-#### config:my_favorite_sr=true. On the other hand, to fall back
-#### on the Default SR, as displayed by XenCenter, set this flag
-#### to: default-sr:true
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+#cache_images=all
-# xenapi_sparse_copy=true
-#### (BoolOpt) Whether to use sparse_copy for copying data on a resize down
-#### (False will use standard dd). This speeds up resizes down
-#### considerably since large runs of zeros won't have to be
-#### rsynced
+# Default OS type (string value)
+#default_os_type=linux
-# xenapi_num_vbd_unplug_retries=10
-#### (IntOpt) Maximum number of retries to unplug VBD
+# Time to wait for a block device to be created (integer
+# value)
+#block_device_creation_timeout=10
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+#max_kernel_ramdisk_size=16777216
-######## defined in nova.virt.xenapi.vmops ########
+# Filter for finding the SR to be used to install guest
+# instances on. The default value is the Local Storage in
+# default XenServer/XCP installations. To select an SR with a
+# different matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+#sr_matching_filter=other-config:i18n-key=local-storage
-# xenapi_running_timeout=60
-#### (IntOpt) number of seconds to wait for instance to go to running
-#### state
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+#xenapi_sparse_copy=true
-# xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
-#### (StrOpt) The XenAPI VIF driver using XenServer Network APIs.
+# Maximum number of retries to unplug VBD (integer value)
+#xenapi_num_vbd_unplug_retries=10
-# xenapi_generate_swap=false
-#### (BoolOpt) Whether to generate swap (False means fetching it from OVA)
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+#xenapi_torrent_images=none
+# Base URL for torrent files. (string value)
+#xenapi_torrent_base_url=<None>
-######## defined in nova.vnc ########
+# Probability that peer will become a seeder. (1.0 = 100%)
+# (floating point value)
+#xenapi_torrent_seed_chance=1.0
-# novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
-#### (StrOpt) location of vnc console proxy, in the form
-#### "http://127.0.0.1:6080/vnc_auto.html"
+# Number of seconds after downloading an image via BitTorrent
+# that it should be seeded for other peers. (integer value)
+#xenapi_torrent_seed_duration=3600
-# xvpvncproxy_base_url=http://127.0.0.1:6081/console
-#### (StrOpt) location of nova xvp vnc console proxy, in the form
-#### "http://127.0.0.1:6081/console"
+# Cached torrent files not accessed within this number of
+# seconds can be reaped (integer value)
+#xenapi_torrent_max_last_accessed=86400
-# vncserver_listen=127.0.0.1
-#### (StrOpt) Ip address on which instance vncserversshould listen
+# Beginning of port range to listen on (integer value)
+#xenapi_torrent_listen_port_start=6881
-# vncserver_proxyclient_address=127.0.0.1
-#### (StrOpt) the address to which proxy clients (like nova-xvpvncproxy)
-#### should connect
+# End of port range to listen on (integer value)
+#xenapi_torrent_listen_port_end=6891
-# vnc_enabled=true
-#### (BoolOpt) enable vnc related features
+# Number of seconds a download can remain at the same progress
+# percentage w/o being considered a stall (integer value)
+#xenapi_torrent_download_stall_cutoff=600
-# vnc_keymap=en-us
-#### (StrOpt) keymap for vnc
+# Maximum number of seeder processes to run concurrently
+# within a given dom0. (-1 = no limit) (integer value)
+#xenapi_torrent_max_seeder_processes_per_host=1
-######## defined in nova.vnc.xvp_proxy ########
+#
+# Options defined in nova.virt.xenapi.vmops
+#
-# xvpvncproxy_port=6081
-#### (IntOpt) Port that the XCP VNC proxy should bind to
+# number of seconds to wait for instance to go to running
+# state (integer value)
+#xenapi_running_timeout=60
-# xvpvncproxy_host=0.0.0.0
-#### (StrOpt) Address that the XCP VNC proxy should bind to
+# The XenAPI VIF driver using XenServer Network APIs. (string
+# value)
+#xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
-######## defined in nova.volume.api ########
+#
+# Options defined in nova.vnc
+#
-# snapshot_same_host=true
-#### (BoolOpt) Create volume from snapshot at the host where snapshot
-#### resides
+# location of vnc console proxy, in the form
+# "http://127.0.0.1:6080/vnc_auto.html" (string value)
+#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
+# location of nova xvp vnc console proxy, in the form
+# "http://127.0.0.1:6081/console" (string value)
+#xvpvncproxy_base_url=http://127.0.0.1:6081/console
-######## defined in nova.volume.cinder ########
+# IP address on which instance vncservers should listen
+# (string value)
+#vncserver_listen=127.0.0.1
-# cinder_catalog_info=volume:cinder:publicURL
-#### (StrOpt) Info to match when looking for cinder in the service
-#### catalog. Format is : separated values of the form:
-#### <service_type>:<service_name>:<endpoint_type>
+# the address to which proxy clients (like nova-xvpvncproxy)
+# should connect (string value)
+#vncserver_proxyclient_address=127.0.0.1
-# cinder_endpoint_template=<None>
-#### (StrOpt) Override service catalog lookup with template for cinder
-#### endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# enable vnc related features (boolean value)
+#vnc_enabled=true
+# keymap for vnc (string value)
+#vnc_keymap=en-us
-######## defined in nova.volume.driver ########
-# volume_group=nova-volumes
-#### (StrOpt) Name for the VG that will contain exported volumes
+#
+# Options defined in nova.vnc.xvp_proxy
+#
-# num_shell_tries=3
-#### (IntOpt) number of times to attempt to run flakey shell commands
+# Port that the XCP VNC proxy should bind to (integer value)
+#xvpvncproxy_port=6081
-# num_iscsi_scan_tries=3
-#### (IntOpt) number of times to rescan iSCSI target to find volume
+# Address that the XCP VNC proxy should bind to (string value)
+#xvpvncproxy_host=0.0.0.0
-# iscsi_num_targets=100
-#### (IntOpt) Number of iscsi target ids per host
-# iscsi_target_prefix=iqn.2010-10.org.openstack:
-#### (StrOpt) prefix for iscsi volumes
+#
+# Options defined in nova.volume
+#
-# iscsi_ip_address=$my_ip
-#### (StrOpt) use this ip for iscsi
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=nova.volume.cinder.API
-# iscsi_port=3260
-#### (IntOpt) The port that the iSCSI daemon is listening on
-# rbd_pool=rbd
-#### (StrOpt) the RADOS pool in which rbd volumes are stored
+#
+# Options defined in nova.volume.cinder
+#
-# rbd_user=<None>
-#### (StrOpt) the RADOS client name for accessing rbd volumes
+# Info to match when looking for cinder in the service
+# catalog. Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info=volume:cinder:publicURL
-# rbd_secret_uuid=<None>
-#### (StrOpt) the libvirt uuid of the secret for the rbd_uservolumes
+# Override service catalog lookup with template for cinder
+# endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# (string value)
+#cinder_endpoint_template=<None>
-# volume_tmp_dir=<None>
-#### (StrOpt) where to store temporary image files if the volume driver
-#### does not write them directly to the volume
+# Number of cinderclient retries on failed http calls (integer
+# value)
+#cinder_http_retries=3
-######## defined in nova.volume.iscsi ########
+[conductor]
-# iscsi_helper=tgtadm
-#### (StrOpt) iscsi target user-land tool to use
+#
+# Options defined in nova.conductor.api
+#
-# volumes_dir=$state_path/volumes
-#### (StrOpt) Volume configuration file storage directory
+# Perform nova-conductor operations locally (boolean value)
+#use_local=false
+# the topic conductor nodes listen on (string value)
+#topic=conductor
-######## defined in nova.volume.manager ########
+# full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
-# storage_availability_zone=nova
-#### (StrOpt) availability zone of this service
-# volume_driver=nova.volume.driver.ISCSIDriver
-#### (StrOpt) Driver to use for volume creation
+[cells]
-# use_local_volumes=true
-#### (BoolOpt) if True, will not discover local volumes
+#
+# Options defined in nova.cells.manager
+#
-# volume_force_update_capabilities=false
-#### (BoolOpt) if True will force update capabilities on each check
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
-######## defined in nova.volume.netapp ########
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
-# netapp_wsdl_url=<None>
-#### (StrOpt) URL of the WSDL file for the DFM server
-# netapp_login=<None>
-#### (StrOpt) User name for the DFM server
+#
+# Options defined in nova.cells.messaging
+#
-# netapp_password=<None>
-#### (StrOpt) Password for the DFM server
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
-# netapp_server_hostname=<None>
-#### (StrOpt) Hostname for the DFM server
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
-# netapp_server_port=8088
-#### (IntOpt) Port number for the DFM server
-# netapp_storage_service=<None>
-#### (StrOpt) Storage service to use for provisioning (when
-#### volume_type=None)
+#
+# Options defined in nova.cells.opts
+#
-# netapp_storage_service_prefix=<None>
-#### (StrOpt) Prefix of storage service name to use for provisioning
-#### (volume_type name will be appended)
+# Enable cell functionality (boolean value)
+#enable=false
-# netapp_vfiler=<None>
-#### (StrOpt) Vfiler to use for provisioning
+# the topic cells nodes listen on (string value)
+#topic=cells
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
-######## defined in nova.volume.netapp_nfs ########
+# name of this cell (string value)
+#name=nova
-# synchronous_snapshot_create=0
-#### (IntOpt) Does snapshot creation call returns immediately
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
-# netapp_wsdl_url=<None>
-#### (StrOpt) URL of the WSDL file for the DFM server
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
-# netapp_login=<None>
-#### (StrOpt) User name for the DFM server
-# netapp_password=<None>
-#### (StrOpt) Password for the DFM server
+#
+# Options defined in nova.cells.rpc_driver
+#
-# netapp_server_hostname=<None>
-#### (StrOpt) Hostname for the DFM server
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
-# netapp_server_port=8088
-#### (IntOpt) Port number for the DFM server
-# netapp_storage_service=<None>
-#### (StrOpt) Storage service to use for provisioning (when
-#### volume_type=None)
+#
+# Options defined in nova.cells.scheduler
+#
-# netapp_storage_service_prefix=<None>
-#### (StrOpt) Prefix of storage service name to use for provisioning
-#### (volume_type name will be appended)
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
-# netapp_vfiler=<None>
-#### (StrOpt) Vfiler to use for provisioning
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
-######## defined in nova.volume.nexenta.volume ########
+#
+# Options defined in nova.cells.state
+#
-# nexenta_host=
-#### (StrOpt) IP address of Nexenta SA
+# Seconds between getting fresh cell info from db. (integer
+# value)
+#db_check_interval=60
-# nexenta_rest_port=2000
-#### (IntOpt) HTTP port to connect to Nexenta REST API server
-# nexenta_rest_protocol=auto
-#### (StrOpt) Use http or https for REST connection (default auto)
+[baremetal]
-# nexenta_user=admin
-#### (StrOpt) User name to connect to Nexenta SA
+#
+# Options defined in nova.virt.baremetal.db.sqlalchemy.session
+#
-# nexenta_password=nexenta
-#### (StrOpt) Password to connect to Nexenta SA
+# The SQLAlchemy connection string used to connect to the
+# bare-metal database (string value)
+#sql_connection=sqlite:///$state_path/baremetal_$sqlite_db
-# nexenta_iscsi_target_portal_port=3260
-#### (IntOpt) Nexenta target portal port
-# nexenta_volume=nova
-#### (StrOpt) pool on SA that will hold all volumes
+#
+# Options defined in nova.virt.baremetal.driver
+#
-# nexenta_target_prefix=iqn.1986-03.com.sun:02:nova-
-#### (StrOpt) IQN prefix for iSCSI targets
+# Whether baremetal compute injects password or not (boolean
+# value)
+#inject_password=true
-# nexenta_target_group_prefix=nova/
-#### (StrOpt) prefix for iSCSI target groups on SA
+# Template file for injected network (string value)
+#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template
-# nexenta_blocksize=
-#### (StrOpt) block size for volumes (blank=default,8KB)
+# Baremetal VIF driver. (string value)
+#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
-# nexenta_sparse=false
-#### (BoolOpt) flag to create sparse volumes
+# Baremetal volume driver. (string value)
+#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver
+# a list of additional capabilities corresponding to
+# instance_type_extra_specs for this compute host to
+# advertise. Valid entries are name=value, pairs For example,
+# "key1:val1, key2:val2" (list value)
+#instance_type_extra_specs=
-######## defined in nova.volume.nfs ########
+# Baremetal driver back-end (pxe or tilera) (string value)
+#driver=nova.virt.baremetal.pxe.PXE
-# nfs_shares_config=<None>
-#### (StrOpt) File with the list of available nfs shares
+# Baremetal power management method (string value)
+#power_manager=nova.virt.baremetal.ipmi.IPMI
-# nfs_disk_util=df
-#### (StrOpt) Use du or df for free space calculation
+# Baremetal compute node's tftp root path (string value)
+#tftp_root=/tftpboot
-# nfs_sparsed_volumes=true
-#### (BoolOpt) Create volumes as sparsed files which take no space.If set
-#### to False volume is created as regular file.In such case
-#### volume creation takes a lot of time.
+#
+# Options defined in nova.virt.baremetal.ipmi
+#
-######## defined in nova.volume.san ########
+# path to baremetal terminal program (string value)
+#terminal=shellinaboxd
-# san_thin_provision=true
-#### (BoolOpt) Use thin provisioning for SAN volumes?
+# path to baremetal terminal SSL cert(PEM) (string value)
+#terminal_cert_dir=<None>
-# san_ip=
-#### (StrOpt) IP address of SAN controller
+# path to directory stores pidfiles of baremetal_terminal
+# (string value)
+#terminal_pid_dir=$state_path/baremetal/console
-# san_login=admin
-#### (StrOpt) Username for SAN controller
+# maximal number of retries for IPMI operations (integer
+# value)
+#ipmi_power_retry=5
-# san_password=
-#### (StrOpt) Password for SAN controller
-# san_private_key=
-#### (StrOpt) Filename of private key to use for SSH authentication
+#
+# Options defined in nova.virt.baremetal.pxe
+#
-# san_clustername=
-#### (StrOpt) Cluster name to use for creating volumes
+# Default kernel image ID used in deployment phase (string
+# value)
+#deploy_kernel=<None>
-# san_ssh_port=22
-#### (IntOpt) SSH port to use with SAN
+# Default ramdisk image ID used in deployment phase (string
+# value)
+#deploy_ramdisk=<None>
-# san_is_local=false
-#### (BoolOpt) Execute commands locally instead of over SSH; use if the
-#### volume service is running on the SAN device
+# Template file for injected network config (string value)
+#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template
-# san_zfs_volume_base=rpool/
-#### (StrOpt) The ZFS path under which to create zvols for volumes.
+# additional append parameters for baremetal PXE boot (string
+# value)
+#pxe_append_params=<None>
+# Template file for PXE configuration (string value)
+#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
-######## defined in nova.volume.solidfire ########
-# sf_emulate_512=true
-#### (BoolOpt) Set 512 byte emulation on volume creation;
+#
+# Options defined in nova.virt.baremetal.volume_driver
+#
-# sf_mvip=
-#### (StrOpt) IP address of SolidFire MVIP
+# Do not set this out of dev/test environments. If a node does
+# not have an fixed PXE IP address, volumes are exported with
+# globally opened ACL (boolean value)
+#use_unsafe_iscsi=false
-# sf_login=admin
-#### (StrOpt) Username for SF Cluster Admin
+# iSCSI IQN prefix used in baremetal volume connections.
+# (string value)
+#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal
-# sf_password=
-#### (StrOpt) Password for SF Cluster Admin
-# sf_allow_tenant_qos=true
-#### (BoolOpt) Allow tenants to specify QOS on create
+[rpc_notifier2]
+#
+# Options defined in nova.openstack.common.notifier.rpc_notifier2
+#
-######## defined in nova.volume.storwize_svc ########
+# AMQP topic(s) used for openstack notifications (list value)
+#topics=notifications
-# storwize_svc_volpool_name=volpool
-#### (StrOpt) Storage system storage pool for volumes
-# storwize_svc_vol_rsize=2%
-#### (StrOpt) Storage system space-efficiency parameter for volumes
+[trusted_computing]
-# storwize_svc_vol_warning=0
-#### (StrOpt) Storage system threshold for volume capacity warnings
+#
+# Options defined in nova.scheduler.filters.trusted_filter
+#
-# storwize_svc_vol_autoexpand=true
-#### (BoolOpt) Storage system autoexpand parameter for volumes (True/False)
+# attestation server http (string value)
+#attestation_server=<None>
-# storwize_svc_vol_grainsize=256
-#### (StrOpt) Storage system grain size parameter for volumes
-#### (32/64/128/256)
+# attestation server Cert file for Identity verification
+# (string value)
+#attestation_server_ca_file=<None>
-# storwize_svc_vol_compression=false
-#### (BoolOpt) Storage system compression option for volumes
+# attestation server port (string value)
+#attestation_port=8443
-# storwize_svc_vol_easytier=true
-#### (BoolOpt) Enable Easy Tier for volumes
+# attestation web API URL (string value)
+#attestation_api_url=/OpenAttestationWebServices/V1.0
-# storwize_svc_flashcopy_timeout=120
-#### (StrOpt) Maximum number of seconds to wait for FlashCopy to be
-#### prepared. Maximum value is 600 seconds (10 minutes).
+# attestation authorization blob - must change (string value)
+#attestation_auth_blob=<None>
-# Total option count: 527
+# Total option count: 514
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index bd015802a..fd1f9c2e0 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -9,6 +9,7 @@
"compute:create:attach_volume": "",
"compute:create:forced_host": "is_admin:True",
"compute:get_all": "",
+ "compute:get_all_tenants": "",
"admin_api": "is_admin:True",
@@ -27,25 +28,38 @@
"compute_extension:admin_actions:resetState": "rule:admin_api",
"compute_extension:admin_actions:migrate": "rule:admin_api",
"compute_extension:aggregates": "rule:admin_api",
+ "compute_extension:agents": "rule:admin_api",
+ "compute_extension:cells": "rule:admin_api",
"compute_extension:certificates": "",
"compute_extension:cloudpipe": "rule:admin_api",
+ "compute_extension:cloudpipe_update": "rule:admin_api",
"compute_extension:console_output": "",
"compute_extension:consoles": "",
+ "compute_extension:coverage_ext": "rule:admin_api",
"compute_extension:createserverext": "",
"compute_extension:deferred_delete": "",
"compute_extension:disk_config": "",
"compute_extension:extended_server_attributes": "rule:admin_api",
"compute_extension:extended_status": "",
+ "compute_extension:fixed_ips": "rule:admin_api",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
- "compute_extension:flavorextraspecs": "",
+ "compute_extension:flavorextraspecs:index": "",
+ "compute_extension:flavorextraspecs:show": "",
+ "compute_extension:flavorextraspecs:create": "rule:admin_api",
+ "compute_extension:flavorextraspecs:update": "rule:admin_api",
+ "compute_extension:flavorextraspecs:delete": "rule:admin_api",
"compute_extension:flavormanage": "rule:admin_api",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
+ "compute_extension:floating_ips_bulk": "rule:admin_api",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "rule:admin_api",
+ "compute_extension:hide_server_addresses": "is_admin:False",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
@@ -53,12 +67,15 @@
"compute_extension:multinic": "",
"compute_extension:networks": "rule:admin_api",
"compute_extension:networks:view": "",
+ "compute_extension:networks_associate": "rule:admin_api",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "rule:admin_api",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "rule:admin_api",
+ "compute_extension:server_password": "",
+ "compute_extension:services": "rule:admin_api",
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
"compute_extension:users": "rule:admin_api",
diff --git a/etc/nova/release.sample b/etc/nova/release.sample
new file mode 100644
index 000000000..4c0d8e48e
--- /dev/null
+++ b/etc/nova/release.sample
@@ -0,0 +1,4 @@
+[Nova]
+vendor = Fedora Project
+product = OpenStack Nova
+package = 1.fc18
diff --git a/etc/nova/rootwrap.conf b/etc/nova/rootwrap.conf
index 730f71695..fb2997abd 100644
--- a/etc/nova/rootwrap.conf
+++ b/etc/nova/rootwrap.conf
@@ -5,3 +5,23 @@
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
+
+# List of directories to search executables in, in case filters do not
+# explicitely specify a full path (separated by ',')
+# If not specified, defaults to system PATH environment variable.
+# These directories MUST all be only writeable by root !
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
+
+# Enable logging to syslog
+# Default value is False
+use_syslog=False
+
+# Which syslog facility to use.
+# Valid values include auth, authpriv, syslog, user0, user1...
+# Default value is 'syslog'
+syslog_log_facility=syslog
+
+# Which messages to log.
+# INFO means log all usage
+# ERROR means only log unsuccessful attempts
+syslog_log_level=ERROR
diff --git a/etc/nova/rootwrap.d/api-metadata.filters b/etc/nova/rootwrap.d/api-metadata.filters
index ef454cbff..1aa6f83e6 100644
--- a/etc/nova/rootwrap.d/api-metadata.filters
+++ b/etc/nova/rootwrap.d/api-metadata.filters
@@ -5,13 +5,9 @@
[Filters]
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
-iptables-save: CommandFilter, /sbin/iptables-save, root
-iptables-save_usr: CommandFilter, /usr/sbin/iptables-save, root
-ip6tables-save: CommandFilter, /sbin/ip6tables-save, root
-ip6tables-save_usr: CommandFilter, /usr/sbin/ip6tables-save, root
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
-iptables-restore: CommandFilter, /sbin/iptables-restore, root
-iptables-restore_usr: CommandFilter, /usr/sbin/iptables-restore, root
-ip6tables-restore: CommandFilter, /sbin/ip6tables-restore, root
-ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
diff --git a/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters b/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
new file mode 100644
index 000000000..a2858cd11
--- /dev/null
+++ b/etc/nova/rootwrap.d/baremetal-compute-ipmi.filters
@@ -0,0 +1,9 @@
+# nova-rootwrap command filters for compute nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova/virt/baremetal/ipmi.py: 'ipmitool', ..
+ipmitool: CommandFilter, /usr/bin/ipmitool, root
+
+# nova/virt/baremetal/ipmi.py: 'kill', '-TERM', str(console_pid)
+kill_shellinaboxd: KillFilter, root, /usr/local/bin/shellinaboxd, -15, -TERM
diff --git a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
new file mode 100644
index 000000000..35fa61723
--- /dev/null
+++ b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
@@ -0,0 +1,11 @@
+# nova-rootwrap command filters for compute nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+
+# nova/virt/baremetal/pxe.py: 'dnsmasq', ...
+dnsmasq: CommandFilter, /usr/sbin/dnsmasq, root
+
+# nova/virt/baremetal/pxe.py: 'kill', '-TERM', str(dnsmasq_pid)
+kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -15, -TERM
+
diff --git a/etc/nova/rootwrap.d/baremetal-deploy-helper.filters b/etc/nova/rootwrap.d/baremetal-deploy-helper.filters
new file mode 100644
index 000000000..65416bbf8
--- /dev/null
+++ b/etc/nova/rootwrap.d/baremetal-deploy-helper.filters
@@ -0,0 +1,10 @@
+# nova-rootwrap command filters for nova-baremetal-deploy-helper
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova-baremetal-deploy-helper
+iscsiadm: CommandFilter, /sbin/iscsiadm, root
+fdisk: CommandFilter, /sbin/fdisk, root
+dd: CommandFilter, /bin/dd, root
+mkswap: CommandFilter, /sbin/mkswap, root
+blkid: CommandFilter, /sbin/blkid, root
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index db9ee2636..f344a1b1c 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -2,65 +2,50 @@
# This file should be owned by (and only-writeable by) the root user
[Filters]
-# nova/virt/disk/mount.py: 'kpartx', '-a', device
-# nova/virt/disk/mount.py: 'kpartx', '-d', device
+# nova/virt/disk/mount/api.py: 'kpartx', '-a', device
+# nova/virt/disk/mount/api.py: 'kpartx', '-d', device
kpartx: CommandFilter, /sbin/kpartx, root
-# nova/virt/disk/mount.py: 'tune2fs', '-c', 0, '-i', 0, mapped_device
# nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
tune2fs: CommandFilter, /sbin/tune2fs, root
-# nova/virt/disk/mount.py: 'mount', mapped_device, mount_dir
+# nova/virt/disk/mount/api.py: 'mount', mapped_device
+# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
# nova/virt/configdrive.py: 'mount', device, mountdir
mount: CommandFilter, /bin/mount, root
-# nova/virt/disk/mount.py: 'umount', mapped_device
+# nova/virt/disk/mount/api.py: 'umount', mapped_device
+# nova/virt/disk/api.py: 'umount' target
# nova/virt/xenapi/vm_utils.py: 'umount', dev_path
# nova/virt/configdrive.py: 'umount', mountdir
umount: CommandFilter, /bin/umount, root
-# nova/virt/disk/nbd.py: 'qemu-nbd', '-c', device, image
-# nova/virt/disk/nbd.py: 'qemu-nbd', '-d', device
+# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
+# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
qemu-nbd: CommandFilter, /usr/bin/qemu-nbd, root
-# nova/virt/disk/loop.py: 'losetup', '--find', '--show', image
-# nova/virt/disk/loop.py: 'losetup', '--detach', device
+# nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
+# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
losetup: CommandFilter, /sbin/losetup, root
-# nova/virt/disk/guestfs.py: 'guestmount', '--rw', '-a', image, '-i'
-# nova/virt/disk/guestfs.py: 'guestmount', '--rw', '-a', image, '-m' dev
-guestmount: CommandFilter, /usr/bin/guestmount, root
-
-# nova/virt/disk/guestfs.py: 'fusermount', 'u', mount_dir
-fusermount: CommandFilter, /bin/fusermount, root
-fusermount_usr: CommandFilter, /usr/bin/fusermount, root
-
-# nova/virt/disk/api.py: 'tee', metadata_path
-# nova/virt/disk/api.py: 'tee', '-a', keyfile
-# nova/virt/disk/api.py: 'tee', netfile
+# nova/virt/disk/vfs/localfs.py: 'tee', canonpath
tee: CommandFilter, /usr/bin/tee, root
-# nova/virt/disk/api.py: 'mkdir', '-p', sshdir
-# nova/virt/disk/api.py: 'mkdir', '-p', netdir
+# nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
mkdir: CommandFilter, /bin/mkdir, root
-# nova/virt/disk/api.py: 'chown', 'root', sshdir
-# nova/virt/disk/api.py: 'chown', 'root:root', netdir
+# nova/virt/disk/vfs/localfs.py: 'chown'
# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
# nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
# nova/utils.py: 'chown', owner_uid, path
chown: CommandFilter, /bin/chown, root
-# nova/virt/disk/api.py: 'chmod', '700', sshdir
-# nova/virt/disk/api.py: 'chmod', 755, netdir
+# nova/virt/disk/vfs/localfs.py: 'chmod'
chmod: CommandFilter, /bin/chmod, root
-# nova/virt/disk/api.py: 'cp', os.path.join(fs...
-cp: CommandFilter, /bin/cp, root
-
# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
@@ -87,8 +72,7 @@ ip: CommandFilter, /sbin/ip, root
# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
-tunctl: CommandFilter, /bin/tunctl, root
-tunctl_usr: CommandFilter, /usr/sbin/tunctl, root
+tunctl: CommandFilter, tunctl, root
# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
@@ -102,13 +86,14 @@ ovs-ofctl: CommandFilter, /usr/bin/ovs-ofctl, root
dd: CommandFilter, /bin/dd, root
# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
-iscsiadm: CommandFilter, /sbin/iscsiadm, root
-iscsiadm_usr: CommandFilter, /usr/bin/iscsiadm, root
+iscsiadm: CommandFilter, iscsiadm, root
# nova/virt/xenapi/vm_utils.py: parted, --script, ...
# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
-parted: CommandFilter, /sbin/parted, root
-parted_usr: CommandFilter, /usr/sbin/parted, root
+parted: CommandFilter, parted, root
+
+# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
+pygrub: CommandFilter, /usr/bin/pygrub, root
# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
fdisk: CommandFilter, /sbin/fdisk, root
@@ -120,21 +105,16 @@ e2fsck: CommandFilter, /sbin/e2fsck, root
resize2fs: CommandFilter, /sbin/resize2fs, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
-iptables-save: CommandFilter, /sbin/iptables-save, root
-iptables-save_usr: CommandFilter, /usr/sbin/iptables-save, root
-ip6tables-save: CommandFilter, /sbin/ip6tables-save, root
-ip6tables-save_usr: CommandFilter, /usr/sbin/ip6tables-save, root
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
-iptables-restore: CommandFilter, /sbin/iptables-restore, root
-iptables-restore_usr: CommandFilter, /usr/sbin/iptables-restore, root
-ip6tables-restore: CommandFilter, /sbin/ip6tables-restore, root
-ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
-arping: CommandFilter, /usr/bin/arping, root
-arping_sbin: CommandFilter, /sbin/arping, root
+arping: CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, /usr/bin/dhcp_release, root
@@ -148,6 +128,7 @@ kill_radvd: KillFilter, root, /usr/sbin/radvd
# nova/network/linux_net.py: dnsmasq call
dnsmasq: DnsmasqFilter, /usr/sbin/dnsmasq, root
+dnsmasq_deprecated: DeprecatedDnsmasqFilter, /usr/sbin/dnsmasq, root
# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
radvd: CommandFilter, /usr/sbin/radvd, root
@@ -156,8 +137,7 @@ radvd: CommandFilter, /usr/sbin/radvd, root
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
-brctl: CommandFilter, /sbin/brctl, root
-brctl_usr: CommandFilter, /usr/sbin/brctl, root
+brctl: CommandFilter, brctl, root
# nova/virt/libvirt/utils.py: 'mkswap'
# nova/virt/xenapi/vm_utils.py: 'mkswap'
@@ -169,9 +149,8 @@ mkfs: CommandFilter, /sbin/mkfs, root
# nova/virt/libvirt/utils.py: 'qemu-img'
qemu-img: CommandFilter, /usr/bin/qemu-img, root
-# nova/virt/disk/api.py: 'readlink', '-e'
-readlink: CommandFilter, /bin/readlink, root
-readlink_usr: CommandFilter, /usr/bin/readlink, root
+# nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
+readlink: CommandFilter, readlink, root
# nova/virt/disk/api.py: 'touch', target
touch: CommandFilter, /usr/bin/touch, root
@@ -190,3 +169,6 @@ lvs: CommandFilter, /sbin/lvs, root
# nova/virt/libvirt/utils.py:
vgs: CommandFilter, /sbin/vgs, root
+
+# nova/virt/baremetal/volume_driver.py: 'tgtadm', '--lld', 'iscsi', ...
+tgtadm: CommandFilter, /usr/sbin/tgtadm, root
diff --git a/etc/nova/rootwrap.d/network.filters b/etc/nova/rootwrap.d/network.filters
index c635f12e4..c58bc77e7 100644
--- a/etc/nova/rootwrap.d/network.filters
+++ b/etc/nova/rootwrap.d/network.filters
@@ -34,22 +34,22 @@ ovs-vsctl: CommandFilter, /usr/bin/ovs-vsctl, root
# nova/network/linux_net.py: 'ovs-ofctl', ....
ovs-ofctl: CommandFilter, /usr/bin/ovs-ofctl, root
+# nova/network/linux_net.py: 'ebtables', '-D' ...
+# nova/network/linux_net.py: 'ebtables', '-I' ...
+ebtables: CommandFilter, /sbin/ebtables, root
+ebtables_usr: CommandFilter, /usr/sbin/ebtables, root
+
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
-iptables-save: CommandFilter, /sbin/iptables-save, root
-iptables-save_usr: CommandFilter, /usr/sbin/iptables-save, root
-ip6tables-save: CommandFilter, /sbin/ip6tables-save, root
-ip6tables-save_usr: CommandFilter, /usr/sbin/ip6tables-save, root
+iptables-save: CommandFilter, iptables-save, root
+ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
-iptables-restore: CommandFilter, /sbin/iptables-restore, root
-iptables-restore_usr: CommandFilter, /usr/sbin/iptables-restore, root
-ip6tables-restore: CommandFilter, /sbin/ip6tables-restore, root
-ip6tables-restore_usr: CommandFilter, /usr/sbin/ip6tables-restore, root
+iptables-restore: CommandFilter, iptables-restore, root
+ip6tables-restore: CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
-arping: CommandFilter, /usr/bin/arping, root
-arping_sbin: CommandFilter, /sbin/arping, root
+arping: CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, /usr/bin/dhcp_release, root
@@ -63,6 +63,7 @@ kill_radvd: KillFilter, root, /usr/sbin/radvd
# nova/network/linux_net.py: dnsmasq call
dnsmasq: DnsmasqFilter, /usr/sbin/dnsmasq, root
+dnsmasq_deprecated: DeprecatedDnsmasqFilter, /usr/sbin/dnsmasq, root
# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
radvd: CommandFilter, /usr/sbin/radvd, root
@@ -71,8 +72,7 @@ radvd: CommandFilter, /usr/sbin/radvd, root
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
-brctl: CommandFilter, /sbin/brctl, root
-brctl_usr: CommandFilter, /usr/sbin/brctl, root
+brctl: CommandFilter, brctl, root
# nova/network/linux_net.py: 'sysctl', ....
sysctl: CommandFilter, /sbin/sysctl, root
diff --git a/nova/api/auth.py b/nova/api/auth.py
index 1562aeede..83388d638 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -21,22 +21,29 @@ Common Auth Middleware.
import webob.dec
import webob.exc
-from nova import config
from nova import context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
-use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
- default=False,
- help='Treat X-Forwarded-For as the canonical remote address. '
- 'Only enable this if you have a sanitizing proxy.')
+auth_opts = [
+ cfg.BoolOpt('api_rate_limit',
+ default=True,
+ help='whether to rate limit the api'),
+ cfg.StrOpt('auth_strategy',
+ default='noauth',
+ help='The strategy to use for auth: noauth or keystone.'),
+ cfg.BoolOpt('use_forwarded_for',
+ default=False,
+ help='Treat X-Forwarded-For as the canonical remote address. '
+ 'Only enable this if you have a sanitizing proxy.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(auth_opts)
-CONF = config.CONF
-CONF.register_opt(use_forwarded_for_opt)
LOG = logging.getLogger(__name__)
@@ -69,7 +76,7 @@ class InjectContext(wsgi.Middleware):
class NovaKeystoneContext(wsgi.Middleware):
- """Make a request context from keystone headers"""
+ """Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
@@ -121,7 +128,7 @@ class NovaKeystoneContext(wsgi.Middleware):
return self.application
def _get_roles(self, req):
- """Get the list of roles"""
+ """Get the list of roles."""
if 'X_ROLES' in req.headers:
roles = req.headers.get('X_ROLES', '')
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 1bf1f9f70..7cd7e1c7d 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -31,10 +31,9 @@ from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
-from nova import config
+from nova.common import memorycache
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -72,13 +71,13 @@ ec2_opts = [
help='Time in seconds before ec2 timestamp expires'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
def ec2_error(req, request_id, code, message):
- """Helper to send an ec2_compatible error"""
+ """Helper to send an ec2_compatible error."""
LOG.error(_('%(code)s: %(message)s') % locals())
resp = webob.Response()
resp.status = 400
@@ -163,12 +162,7 @@ class Lockout(wsgi.Middleware):
def __init__(self, application):
"""middleware can use fake for testing."""
- if CONF.memcached_servers:
- import memcache
- else:
- from nova.common import memorycache as memcache
- self.mc = memcache.Client(CONF.memcached_servers,
- debug=0)
+ self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
@@ -257,7 +251,7 @@ class EC2KeystoneAuth(wsgi.Middleware):
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError), e:
- LOG.exception("Keystone failure: %s" % e)
+ LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return ec2_error(req, request_id, "Unauthorized", msg)
@@ -332,7 +326,7 @@ class Requestify(wsgi.Middleware):
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
- except KeyError, e:
+ except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 6cd7c4431..ca1302fad 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -26,7 +26,6 @@ from xml.dom import minidom
from nova.api.ec2 import ec2utils
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -42,7 +41,7 @@ def _underscore_to_xmlcase(str):
def _database_to_isoformat(datetimeobj):
- """Return a xs:dateTime parsable string from datatime"""
+ """Return a xs:dateTime parsable string from datatime."""
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z'
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9d73673a4..414b2e969 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -27,26 +27,55 @@ import time
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
+from nova.api.metadata import password
from nova.api import validator
+from nova import availability_zones
from nova import block_device
+from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import vm_states
-from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.image import s3
from nova import network
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
+from nova import servicegroup
from nova import utils
from nova import volume
-
-CONF = config.CONF
+ec2_opts = [
+ cfg.StrOpt('ec2_host',
+ default='$my_ip',
+ help='the ip of the ec2 api server'),
+ cfg.StrOpt('ec2_dmz_host',
+ default='$my_ip',
+ help='the internal ip of the ec2 api server'),
+ cfg.IntOpt('ec2_port',
+ default=8773,
+ help='the port of the ec2 api server'),
+ cfg.StrOpt('ec2_scheme',
+ default='http',
+ help='the protocol to use when connecting to the ec2 api '
+ 'server (http, https)'),
+ cfg.StrOpt('ec2_path',
+ default='/services/Cloud',
+ help='the path prefix used to call the ec2 api server'),
+ cfg.ListOpt('region_list',
+ default=[],
+ help='list of region=fqdn pairs separated by commas'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(ec2_opts)
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
+CONF.import_opt('internal_service_availability_zone',
+ 'nova.availability_zones')
LOG = logging.getLogger(__name__)
@@ -55,11 +84,11 @@ QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
- raise exception.InvalidInstanceIDMalformed(val)
+ raise exception.InvalidInstanceIDMalformed(val=val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
- raise exception.InvalidInstanceIDMalformed(val)
+ raise exception.InvalidInstanceIDMalformed(val=val)
# EC2 API can return the following values as documented in the EC2 API
@@ -82,7 +111,7 @@ _STATE_DESCRIPTION_MAP = {
def _state_description(vm_state, _shutdown_terminate):
- """Map the vm state to the server status string"""
+ """Map the vm state to the server status string."""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
@@ -106,11 +135,10 @@ def _parse_block_device_mapping(bdm):
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
- id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
if ec2_id.startswith('snap-'):
- bdm['snapshot_id'] = id
+ bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id)
elif ec2_id.startswith('vol-'):
- bdm['volume_id'] = id
+ bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id)
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
@@ -121,7 +149,7 @@ def _properties_get_mappings(properties):
def _format_block_device_mapping(bdm):
- """Contruct BlockDeviceMappingItemType
+ """Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
@@ -153,7 +181,7 @@ def _format_block_device_mapping(bdm):
def _format_mappings(properties, result):
- """Format multiple BlockDeviceMappingItemType"""
+ """Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
@@ -177,7 +205,7 @@ def _format_mappings(properties, result):
class CloudController(object):
- """ CloudController provides the critical dispatch between
+ """CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
@@ -190,10 +218,22 @@ class CloudController(object):
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute_api.KeypairAPI()
+ self.servicegroup_api = servicegroup.API()
def __str__(self):
return 'CloudController'
+ def _enforce_valid_instance_ids(self, context, instance_ids):
+ # NOTE(mikal): Amazon's implementation of the EC2 API requires that
+ # _all_ instance ids passed in be valid.
+ instances = {}
+ if instance_ids:
+ for ec2_id in instance_ids:
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
+ instances[ec2_id] = instance
+ return instances
+
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
@@ -214,19 +254,23 @@ class CloudController(object):
"""Return available and unavailable zones."""
enabled_services = db.service_get_all(context, False)
disabled_services = db.service_get_all(context, True)
+ enabled_services = availability_zones.set_availability_zones(context,
+ enabled_services)
+ disabled_services = availability_zones.set_availability_zones(context,
+ disabled_services)
available_zones = []
- for zone in [service.availability_zone for service
+ for zone in [service['availability_zone'] for service
in enabled_services]:
if not zone in available_zones:
available_zones.append(zone)
not_available_zones = []
- for zone in [service.availability_zone for service in disabled_services
- if not service['availability_zone'] in available_zones]:
- if not zone in not_available_zones:
+ zones = [service['available_zones'] for service in disabled_services
+ if service['available_zones'] not in available_zones]
+ for zone in zones:
+ if zone not in not_available_zones:
not_available_zones.append(zone)
-
return (available_zones, not_available_zones)
def _describe_availability_zones(self, context, **kwargs):
@@ -235,6 +279,9 @@ class CloudController(object):
result = []
for zone in available_zones:
+ # Hide internal_service_availability_zone
+ if zone == CONF.internal_service_availability_zone:
+ continue
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
@@ -248,15 +295,20 @@ class CloudController(object):
# Available services
enabled_services = db.service_get_all(context, False)
+ enabled_services = availability_zones.set_availability_zones(context,
+ enabled_services)
zone_hosts = {}
host_services = {}
for service in enabled_services:
- zone_hosts.setdefault(service.availability_zone, [])
- if not service.host in zone_hosts[service.availability_zone]:
- zone_hosts[service.availability_zone].append(service.host)
+ zone_hosts.setdefault(service['availability_zone'], [])
+ if not service['host'] in zone_hosts[service['availability_zone']]:
+ zone_hosts[service['availability_zone']].append(
+ service['host'])
- host_services.setdefault(service.host, [])
- host_services[service.host].append(service)
+ host_services.setdefault(service['availability_zone'] +
+ service['host'], [])
+ host_services[service['availability_zone'] + service['host']].\
+ append(service)
result = []
for zone in available_zones:
@@ -266,8 +318,8 @@ class CloudController(object):
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
- for service in host_services[host]:
- alive = utils.service_is_up(service)
+ for service in host_services[zone + host]:
+ alive = self.servicegroup_api.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service['disabled']:
@@ -318,14 +370,34 @@ class CloudController(object):
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
- snapshots = [self._format_snapshot(context, s) for s in snapshots]
- return {'snapshotSet': snapshots}
+
+ formatted_snapshots = []
+ for s in snapshots:
+ formatted = self._format_snapshot(context, s)
+ if formatted:
+ formatted_snapshots.append(formatted)
+ return {'snapshotSet': formatted_snapshots}
def _format_snapshot(self, context, snapshot):
+ # NOTE(mikal): this is just a set of strings in cinder. If they
+ # implement an enum, then we should move this code to use it. The
+ # valid ec2 statuses are "pending", "completed", and "error".
+ status_map = {'new': 'pending',
+ 'creating': 'pending',
+ 'available': 'completed',
+ 'active': 'completed',
+ 'deleting': 'pending',
+ 'deleted': None,
+ 'error': 'error'}
+
+ mapped_status = status_map.get(snapshot['status'], snapshot['status'])
+ if not mapped_status:
+ return None
+
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
- s['status'] = snapshot['status']
+ s['status'] = mapped_status
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
@@ -449,22 +521,22 @@ class CloudController(object):
def _format_security_group(self, context, group):
g = {}
- g['groupDescription'] = group.description
- g['groupName'] = group.name
- g['ownerId'] = group.project_id
+ g['groupDescription'] = group['description']
+ g['groupName'] = group['name']
+ g['ownerId'] = group['project_id']
g['ipPermissions'] = []
- for rule in group.rules:
+ for rule in group['rules']:
r = {}
r['groups'] = []
r['ipRanges'] = []
- if rule.group_id:
- source_group = rule.grantee_group
- r['groups'] += [{'groupName': source_group.name,
- 'userId': source_group.project_id}]
- if rule.protocol:
- r['ipProtocol'] = rule.protocol.lower()
- r['fromPort'] = rule.from_port
- r['toPort'] = rule.to_port
+ if rule['group_id']:
+ source_group = rule['grantee_group']
+ r['groups'] += [{'groupName': source_group['name'],
+ 'userId': source_group['project_id']}]
+ if rule['protocol']:
+ r['ipProtocol'] = rule['protocol'].lower()
+ r['fromPort'] = rule['from_port']
+ r['toPort'] = rule['to_port']
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
@@ -475,10 +547,10 @@ class CloudController(object):
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
- r['ipProtocol'] = rule.protocol
- r['fromPort'] = rule.from_port
- r['toPort'] = rule.to_port
- r['ipRanges'] += [{'cidrIp': rule.cidr}]
+ r['ipProtocol'] = rule['protocol']
+ r['fromPort'] = rule['from_port']
+ r['toPort'] = rule['to_port']
+ r['ipRanges'] += [{'cidrIp': rule['cidr']}]
g['ipPermissions'] += [r]
return g
@@ -586,7 +658,7 @@ class CloudController(object):
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
- values_for_rule['parent_group_id'] = security_group.id
+ values_for_rule['parent_group_id'] = security_group['id']
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
@@ -619,7 +691,7 @@ class CloudController(object):
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
- values_for_rule['parent_group_id'] = security_group.id
+ values_for_rule['parent_group_id'] = security_group['id']
if self.security_group_api.rule_exists(security_group,
values_for_rule):
err = _('%s - This rule already exists in group')
@@ -687,6 +759,23 @@ class CloudController(object):
return True
+ def get_password_data(self, context, instance_id, **kwargs):
+ # instance_id may be passed in as a list of instances
+ if isinstance(instance_id, list):
+ ec2_id = instance_id[0]
+ else:
+ ec2_id = instance_id
+ validate_ec2_id(ec2_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
+ output = password.extract_password(instance)
+ # NOTE(vish): this should be timestamp from the metadata fields
+ # but it isn't important enough to implement properly
+ now = timeutils.utcnow()
+ return {"InstanceId": ec2_id,
+ "Timestamp": now,
+ "passwordData": output}
+
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
@@ -931,14 +1020,19 @@ class CloudController(object):
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
+ instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
- instance_id=instance_id)
+ instance_id=instance_id,
+ instance_cache=instances)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
+ instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
- instance_id=instance_id, use_v6=True)
+ instance_id=instance_id,
+ instance_cache=instances,
+ use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
@@ -969,7 +1063,7 @@ class CloudController(object):
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
- """Format InstanceBlockDeviceMappingResponseItemType"""
+ """Format InstanceBlockDeviceMappingResponseItemType."""
root_device_type = 'instance-store'
mapping = []
for bdm in db.block_device_mapping_get_all_by_instance(context,
@@ -1020,23 +1114,30 @@ class CloudController(object):
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
- **search_opts):
+ instances_cache=None, **search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
+
+ if not instances_cache:
+ instances_cache = {}
+
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
- try:
- instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
- ec2_id)
- instance = self.compute_api.get(context, instance_uuid)
- except exception.NotFound:
- continue
- instances.append(instance)
+ if ec2_id in instances_cache:
+ instances.append(instances_cache[ec2_id])
+ else:
+ try:
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
+ ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
+ except exception.NotFound:
+ continue
+ instances.append(instance)
else:
try:
# always filter out deleted instances
@@ -1046,9 +1147,10 @@ class CloudController(object):
sort_dir='asc')
except exception.NotFound:
instances = []
+
for instance in instances:
if not context.is_admin:
- if instance['image_ref'] == str(CONF.vpn_image_id):
+ if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
@@ -1244,7 +1346,7 @@ class CloudController(object):
return self._format_run_instances(context, resv_id)
def _ec2_ids_to_instances(self, context, instance_id):
- """Get all instances first, to prevent partial executions"""
+ """Get all instances first, to prevent partial executions."""
instances = []
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
@@ -1265,7 +1367,7 @@ class CloudController(object):
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
- """instance_id is a list of instance ids"""
+ """instance_id is a list of instance ids."""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 997197d38..cfe0d7879 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -18,10 +18,10 @@
import re
+from nova import availability_zones
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -73,7 +73,7 @@ def glance_id_to_ec2_id(context, glance_id, image_type='ami'):
def ec2_id_to_id(ec2_id):
- """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
+ """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)."""
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
@@ -104,7 +104,7 @@ def get_ip_info_for_instance_from_nw_info(nw_info):
def get_ip_info_for_instance(context, instance):
- """Return a dictionary of IP information for an instance"""
+ """Return a dictionary of IP information for an instance."""
info_cache = instance['info_cache'] or {}
cached_nwinfo = info_cache.get('network_info')
@@ -117,18 +117,20 @@ def get_ip_info_for_instance(context, instance):
def get_availability_zone_by_host(services, host):
if len(services) > 0:
- return services[0]['availability_zone']
+ return availability_zones.get_host_availability_zone(context, host)
return 'unknown zone'
def id_to_ec2_id(instance_id, template='i-%08x'):
- """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
+ """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])."""
return template % int(instance_id)
def id_to_ec2_inst_id(instance_id):
"""Get or create an ec2 instance ID (i-[base 16 number]) from uuid."""
- if uuidutils.is_uuid_like(instance_id):
+ if instance_id is None:
+ return None
+ elif uuidutils.is_uuid_like(instance_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_instance_uuid(ctxt, instance_id)
return id_to_ec2_id(int_id)
diff --git a/nova/api/ec2/faults.py b/nova/api/ec2/faults.py
index 331603a3a..9299a946c 100644
--- a/nova/api/ec2/faults.py
+++ b/nova/api/ec2/faults.py
@@ -15,12 +15,11 @@
import webob.dec
import webob.exc
-from nova import config
from nova import context
-from nova import flags
+from nova.openstack.common import cfg
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
class Fault(webob.exc.HTTPException):
diff --git a/nova/api/manager.py b/nova/api/manager.py
index dc081d9a6..e9b07aa92 100644
--- a/nova/api/manager.py
+++ b/nova/api/manager.py
@@ -16,12 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova import manager
-from nova.openstack.common import importutils
-
-CONF = config.CONF
+from nova.network import driver
class MetadataManager(manager.Manager):
@@ -32,7 +28,7 @@ class MetadataManager(manager.Manager):
"""
def __init__(self, *args, **kwargs):
super(MetadataManager, self).__init__(*args, **kwargs)
- self.network_driver = importutils.import_module(CONF.network_driver)
+ self.network_driver = driver.load_network_driver()
def init_host(self):
"""Perform any initialization.
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 469d87d46..34d412268 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -21,15 +21,16 @@
import base64
import json
import os
+import posixpath
from nova.api.ec2 import ec2utils
+from nova.api.metadata import password
from nova import block_device
-from nova import config
from nova import context
from nova import db
-from nova import flags
from nova import network
from nova.openstack.common import cfg
+from nova.openstack.common import timeutils
from nova.virt import netutils
@@ -41,7 +42,7 @@ metadata_opts = [
'config drive')),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(metadata_opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
@@ -58,11 +59,17 @@ VERSIONS = [
'2009-04-04',
]
-OPENSTACK_VERSIONS = ["2012-08-10"]
+FOLSOM = '2012-08-10'
+GRIZZLY = '2013-04-04'
+OPENSTACK_VERSIONS = [
+ FOLSOM,
+ GRIZZLY,
+]
CONTENT_DIR = "content"
MD_JSON_NAME = "meta_data.json"
UD_NAME = "user_data"
+PASS_NAME = "password"
class InvalidMetadataVersion(Exception):
@@ -129,6 +136,8 @@ class InstanceMetadata():
for item in instance.get('metadata', []):
self.launch_metadata[item['key']] = item['value']
+ self.password = password.extract_password(instance)
+
self.uuid = instance.get('uuid')
self.content = {}
@@ -258,6 +267,8 @@ class InstanceMetadata():
ret = [MD_JSON_NAME]
if self.userdata_raw is not None:
ret.append(UD_NAME)
+ if self._check_os_version(GRIZZLY, version):
+ ret.append(PASS_NAME)
return ret
if path == UD_NAME:
@@ -265,6 +276,9 @@ class InstanceMetadata():
raise KeyError(path)
return self.userdata_raw
+ if path == PASS_NAME and self._check_os_version(GRIZZLY, version):
+ return password.handle_password
+
if path != MD_JSON_NAME:
raise KeyError(path)
@@ -298,14 +312,20 @@ class InstanceMetadata():
metadata['launch_index'] = self.instance['launch_index']
metadata['availability_zone'] = self.availability_zone
+ if self._check_os_version(GRIZZLY, version):
+ metadata['random_seed'] = base64.b64encode(os.urandom(512))
+
data = {
MD_JSON_NAME: json.dumps(metadata),
}
return data[path]
- def _check_version(self, required, requested):
- return VERSIONS.index(requested) >= VERSIONS.index(required)
+ def _check_version(self, required, requested, versions=VERSIONS):
+ return versions.index(requested) >= versions.index(required)
+
+ def _check_os_version(self, required, requested):
+ return self._check_version(required, requested, OPENSTACK_VERSIONS)
def _get_hostname(self):
return "%s%s%s" % (self.instance['hostname'],
@@ -314,9 +334,9 @@ class InstanceMetadata():
def lookup(self, path):
if path == "" or path[0] != "/":
- path = os.path.normpath("/" + path)
+ path = posixpath.normpath("/" + path)
else:
- path = os.path.normpath(path)
+ path = posixpath.normpath(path)
# fix up requests, prepending /ec2 to anything that does not match
path_tokens = path.split('/')[1:]
@@ -333,7 +353,10 @@ class InstanceMetadata():
# specifically handle the top level request
if len(path_tokens) == 1:
if path_tokens[0] == "openstack":
- versions = OPENSTACK_VERSIONS + ["latest"]
+ # NOTE(vish): don't show versions that are in the future
+ today = timeutils.utcnow().strftime("%Y-%m-%d")
+ versions = [v for v in OPENSTACK_VERSIONS if v <= today]
+ versions += ["latest"]
else:
versions = VERSIONS + ["latest"]
return versions
@@ -385,7 +408,14 @@ def get_metadata_by_address(address):
ctxt = context.get_admin_context()
fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)
- instance = db.instance_get_by_uuid(ctxt, fixed_ip['instance_uuid'])
+ return get_metadata_by_instance_id(fixed_ip['instance_uuid'],
+ address,
+ ctxt)
+
+
+def get_metadata_by_instance_id(instance_id, address, ctxt=None):
+ ctxt = ctxt or context.get_admin_context()
+ instance = db.instance_get_by_uuid(ctxt, instance_id)
return InstanceMetadata(instance, address)
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 14ec696cd..fbb46930b 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -17,36 +17,49 @@
# under the License.
"""Metadata request handler."""
+import hashlib
+import hmac
import os
import webob.dec
import webob.exc
from nova.api.metadata import base
-from nova import config
+from nova.common import memorycache
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi
-CONF = config.CONF
+CACHE_EXPIRATION = 15 # in seconds
+
+CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
-LOG = logging.getLogger(__name__)
+metadata_proxy_opts = [
+ cfg.BoolOpt(
+ 'service_quantum_metadata_proxy',
+ default=False,
+ help='Set flag to indicate Quantum will proxy metadata requests and '
+ 'resolve instance ids.'),
+ cfg.StrOpt(
+ 'quantum_metadata_proxy_shared_secret',
+ default='',
+ help='Shared secret to validate proxies Quantum metadata requests')
+]
-if CONF.memcached_servers:
- import memcache
-else:
- from nova.common import memorycache as memcache
+CONF.register_opts(metadata_proxy_opts)
+
+LOG = logging.getLogger(__name__)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
- self._cache = memcache.Client(CONF.memcached_servers, debug=0)
+ self._cache = memorycache.get_client()
- def get_metadata(self, address):
+ def get_metadata_by_remote_address(self, address):
if not address:
raise exception.FixedIpNotFoundForAddress(address=address)
@@ -60,35 +73,116 @@ class MetadataRequestHandler(wsgi.Application):
except exception.NotFound:
return None
- self._cache.set(cache_key, data, 15)
+ self._cache.set(cache_key, data, CACHE_EXPIRATION)
+
+ return data
+
+ def get_metadata_by_instance_id(self, instance_id, address):
+ cache_key = 'metadata-%s' % instance_id
+ data = self._cache.get(cache_key)
+ if data:
+ return data
+
+ try:
+ data = base.get_metadata_by_instance_id(instance_id, address)
+ except exception.NotFound:
+ return None
+
+ self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
+ if os.path.normpath("/" + req.path_info) == "/":
+ return(base.ec2_md_print(base.VERSIONS + ["latest"]))
+
+ if CONF.service_quantum_metadata_proxy:
+ meta_data = self._handle_instance_id_request(req)
+ else:
+ if req.headers.get('X-Instance-ID'):
+ LOG.warn(
+ _("X-Instance-ID present in request headers. The "
+ "'service_quantum_metadata_proxy' option must be enabled"
+ " to process this header."))
+ meta_data = self._handle_remote_ip_request(req)
+
+ if meta_data is None:
+ raise webob.exc.HTTPNotFound()
+
+ try:
+ data = meta_data.lookup(req.path_info)
+ except base.InvalidMetadataPath:
+ raise webob.exc.HTTPNotFound()
+
+ if callable(data):
+ return data(req, meta_data)
+
+ return base.ec2_md_print(data)
+
+ def _handle_remote_ip_request(self, req):
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
- if os.path.normpath("/" + req.path_info) == "/":
- return(base.ec2_md_print(base.VERSIONS + ["latest"]))
-
try:
- meta_data = self.get_metadata(remote_address)
+ meta_data = self.get_metadata_by_remote_address(remote_address)
except Exception:
LOG.exception(_('Failed to get metadata for ip: %s'),
remote_address)
msg = _('An unknown error has occurred. '
'Please try your request again.')
- exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg))
- return exc
+ raise webob.exc.HTTPInternalServerError(explanation=unicode(msg))
+
if meta_data is None:
LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
- raise webob.exc.HTTPNotFound()
+
+ return meta_data
+
+ def _handle_instance_id_request(self, req):
+ instance_id = req.headers.get('X-Instance-ID')
+ signature = req.headers.get('X-Instance-ID-Signature')
+ remote_address = req.remote_addr
+
+ # Ensure that only one header was passed
+
+ if instance_id is None:
+ msg = _('X-Instance-ID header is missing from request.')
+ elif not isinstance(instance_id, basestring):
+ msg = _('Multiple X-Instance-ID headers found within request.')
+ else:
+ msg = None
+
+ if msg:
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ expected_signature = hmac.new(
+ CONF.quantum_metadata_proxy_shared_secret,
+ instance_id,
+ hashlib.sha256).hexdigest()
+
+ if expected_signature != signature:
+ if instance_id:
+ w = _('X-Instance-ID-Signature: %(signature)s does not match '
+ 'the expected value: %(expected_signature)s for id: '
+ '%(instance_id)s. Request From: %(remote_address)s')
+ LOG.warn(w % locals())
+
+ msg = _('Invalid proxy request signature.')
+ raise webob.exc.HTTPForbidden(explanation=msg)
try:
- data = meta_data.lookup(req.path_info)
- except base.InvalidMetadataPath:
- raise webob.exc.HTTPNotFound()
+ meta_data = self.get_metadata_by_instance_id(instance_id,
+ remote_address)
+ except Exception:
+ LOG.exception(_('Failed to get metadata for instance id: %s'),
+ instance_id)
+ msg = _('An unknown error has occurred. '
+ 'Please try your request again.')
+ raise webob.exc.HTTPInternalServerError(explanation=unicode(msg))
- return base.ec2_md_print(data)
+ if meta_data is None:
+ LOG.error(_('Failed to get metadata for instance id: %s'),
+ instance_id)
+
+ return meta_data
diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py
new file mode 100644
index 000000000..b2bb83b15
--- /dev/null
+++ b/nova/api/metadata/password.py
@@ -0,0 +1,68 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import context
+from nova import db
+
+
+CHUNKS = 4
+CHUNK_LENGTH = 255
+MAX_SIZE = CHUNKS * CHUNK_LENGTH
+
+
+def extract_password(instance):
+ result = ''
+ for datum in sorted(instance.get('system_metadata', []),
+ key=lambda x: x['key']):
+ if datum['key'].startswith('password_'):
+ result += datum['value']
+ return result or None
+
+
+def set_password(context, instance_uuid, password):
+ """Stores password as system_metadata items.
+
+ Password is stored with the keys 'password_0' -> 'password_3'.
+ """
+ password = password or ''
+ meta = {}
+ for i in xrange(CHUNKS):
+ meta['password_%d' % i] = password[:CHUNK_LENGTH]
+ password = password[CHUNK_LENGTH:]
+ db.instance_system_metadata_update(context,
+ instance_uuid,
+ meta,
+ False)
+
+
+def handle_password(req, meta_data):
+ ctxt = context.get_admin_context()
+ password = meta_data.password
+ if req.method == 'GET':
+ return meta_data.password
+ elif req.method == 'POST':
+ # NOTE(vish): The conflict will only happen once the metadata cache
+ # updates, but it isn't a huge issue if it can be set for
+ # a short window.
+ if meta_data.password:
+ raise exc.HTTPConflict()
+ if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
+ msg = _("Request is too large.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ set_password(ctxt, meta_data.uuid, req.body)
+ else:
+ raise exc.HTTPBadRequest()
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 67ea6e1b5..d812cef18 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -124,7 +124,7 @@ class APIRouter(base_wsgi.Router):
@classmethod
def factory(cls, global_config, **local_config):
- """Simple paste factory, :class:`nova.wsgi.Router` doesn't have one"""
+ """Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
def __init__(self, ext_mgr=None, init_only=None):
@@ -189,5 +189,5 @@ class APIRouter(base_wsgi.Router):
resource.register_actions(controller)
resource.register_extensions(controller)
- def _setup_routes(self, mapper, ext_mgr):
- raise NotImplementedError
+ def _setup_routes(self, mapper, ext_mgr, init_only):
+ raise NotImplementedError()
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 78064012b..24276b2bd 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -21,14 +21,13 @@ import webob.dec
import webob.exc
from nova.api.openstack import wsgi
-from nova import config
from nova import context
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi as base_wsgi
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 50ac76179..b2583588d 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -28,15 +28,29 @@ from nova.api.openstack import xmlutil
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import quota
+osapi_opts = [
+ cfg.IntOpt('osapi_max_limit',
+ default=1000,
+ help='the maximum number of items returned in a single '
+ 'response from a collection resource'),
+ cfg.StrOpt('osapi_compute_link_prefix',
+ default=None,
+ help='Base URL that will be presented to users in links '
+ 'to the OpenStack Compute API'),
+ cfg.StrOpt('osapi_glance_link_prefix',
+ default=None,
+ help='Base URL that will be presented to users in links '
+ 'to glance resources'),
+]
+CONF = cfg.CONF
+CONF.register_opts(osapi_opts)
LOG = logging.getLogger(__name__)
-CONF = config.CONF
QUOTAS = quota.QUOTAS
@@ -132,7 +146,7 @@ def get_pagination_params(request):
def _get_limit_param(request):
- """Extract integer limit from request or fail"""
+ """Extract integer limit from request or fail."""
try:
limit = int(request.GET['limit'])
except ValueError:
@@ -145,7 +159,7 @@ def _get_limit_param(request):
def _get_marker_param(request):
- """Extract marker id from request or fail"""
+ """Extract marker id from request or fail."""
return request.GET['marker']
@@ -187,7 +201,7 @@ def limited(items, request, max_limit=CONF.osapi_max_limit):
def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
- """get limited parameter from request"""
+ """get limited parameter from request."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
@@ -358,7 +372,7 @@ class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
- """Marshal the metadata attribute of a parsed request"""
+ """Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
@@ -443,8 +457,7 @@ class ViewBuilder(object):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
- prefix = self._update_link_prefix(request.application_url,
- CONF.osapi_compute_link_prefix)
+ prefix = self._update_compute_link_prefix(request.application_url)
url = os.path.join(prefix,
request.environ["nova.context"].project_id,
collection_name)
@@ -452,8 +465,7 @@ class ViewBuilder(object):
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
- prefix = self._update_link_prefix(request.application_url,
- CONF.osapi_compute_link_prefix)
+ prefix = self._update_compute_link_prefix(request.application_url)
return os.path.join(prefix,
request.environ["nova.context"].project_id,
collection_name,
@@ -462,8 +474,7 @@ class ViewBuilder(object):
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
- base_url = self._update_link_prefix(base_url,
- CONF.osapi_compute_link_prefix)
+ base_url = self._update_compute_link_prefix(base_url)
return os.path.join(base_url,
request.environ["nova.context"].project_id,
collection_name,
@@ -500,3 +511,11 @@ class ViewBuilder(object):
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
+
+ def _update_glance_link_prefix(self, orig_url):
+ return self._update_link_prefix(orig_url,
+ CONF.osapi_glance_link_prefix)
+
+ def _update_compute_link_prefix(self, orig_url):
+ return self._update_link_prefix(orig_url,
+ CONF.osapi_compute_link_prefix)
diff --git a/nova/api/openstack/compute/__init__.py b/nova/api/openstack/compute/__init__.py
index e6704951f..92c84c13f 100644
--- a/nova/api/openstack/compute/__init__.py
+++ b/nova/api/openstack/compute/__init__.py
@@ -31,8 +31,6 @@ from nova.api.openstack.compute import limits
from nova.api.openstack.compute import server_metadata
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import versions
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -43,7 +41,7 @@ allow_instance_snapshots_opt = cfg.BoolOpt('allow_instance_snapshots',
default=True,
help='Permit instance snapshot operations.')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(allow_instance_snapshots_opt)
@@ -59,7 +57,8 @@ class APIRouter(nova.api.openstack.APIRouter):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
- action='show')
+ action='show',
+ conditions={"method": ['GET']})
mapper.redirect("", "/")
diff --git a/nova/api/openstack/compute/consoles.py b/nova/api/openstack/compute/consoles.py
index c0097b0eb..613a375f2 100644
--- a/nova/api/openstack/compute/consoles.py
+++ b/nova/api/openstack/compute/consoles.py
@@ -25,7 +25,7 @@ from nova import exception
def _translate_keys(cons):
- """Coerces a console instance into proper dictionary format """
+ """Coerces a console instance into proper dictionary format."""
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type']}
@@ -80,14 +80,14 @@ class ConsolesTemplate(xmlutil.TemplateBuilder):
class Controller(object):
- """The Consoles controller for the OpenStack API"""
+ """The Consoles controller for the OpenStack API."""
def __init__(self):
self.console_api = console_api.API()
@wsgi.serializers(xml=ConsolesTemplate)
def index(self, req, server_id):
- """Returns a list of consoles for this instance"""
+ """Returns a list of consoles for this instance."""
consoles = self.console_api.get_consoles(
req.environ['nova.context'],
server_id)
@@ -95,14 +95,14 @@ class Controller(object):
for console in consoles])
def create(self, req, server_id):
- """Creates a new console"""
+ """Creates a new console."""
self.console_api.create_console(
req.environ['nova.context'],
server_id)
@wsgi.serializers(xml=ConsoleTemplate)
def show(self, req, server_id, id):
- """Shows in-depth information on a specific console"""
+ """Shows in-depth information on a specific console."""
try:
console = self.console_api.get_console(
req.environ['nova.context'],
@@ -113,11 +113,11 @@ class Controller(object):
return _translate_detail_keys(console)
def update(self, req, server_id, id):
- """You can't update a console"""
+ """You can't update a console."""
raise exc.HTTPNotImplemented()
def delete(self, req, server_id, id):
- """Deletes a console"""
+ """Deletes a console."""
try:
self.console_api.delete_console(req.environ['nova.context'],
server_id,
diff --git a/nova/api/openstack/compute/contrib/__init__.py b/nova/api/openstack/compute/contrib/__init__.py
index e6a1e9c4d..7b21a0be8 100644
--- a/nova/api/openstack/compute/contrib/__init__.py
+++ b/nova/api/openstack/compute/contrib/__init__.py
@@ -22,12 +22,19 @@ It can't be called 'extensions' because that causes namespacing problems.
"""
from nova.api.openstack import extensions
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
+ext_opts = [
+ cfg.ListOpt('osapi_compute_ext_list',
+ default=[],
+ help='Specify list of extensions to load when using osapi_'
+ 'compute_extension option with nova.api.openstack.'
+ 'compute.contrib.select_extensions'),
+]
+CONF = cfg.CONF
+CONF.register_opts(ext_opts)
-CONF = config.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index 1bac0851d..fa7836b37 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -24,7 +24,6 @@ from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -47,7 +46,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('pause')
def _pause(self, req, id, body):
- """Permit Admins to pause the server"""
+ """Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
try:
@@ -64,7 +63,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('unpause')
def _unpause(self, req, id, body):
- """Permit Admins to unpause the server"""
+ """Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
try:
@@ -81,7 +80,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('suspend')
def _suspend(self, req, id, body):
- """Permit admins to suspend the server"""
+ """Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
try:
@@ -98,7 +97,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('resume')
def _resume(self, req, id, body):
- """Permit admins to resume the server from suspend"""
+ """Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
try:
@@ -115,7 +114,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('migrate')
def _migrate(self, req, id, body):
- """Permit admins to migrate a server to a new host"""
+ """Permit admins to migrate a server to a new host."""
context = req.environ['nova.context']
authorize(context, 'migrate')
try:
@@ -131,7 +130,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
- """Permit admins to reset networking on an server"""
+ """Permit admins to reset networking on an server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
try:
@@ -145,7 +144,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
- """Permit admins to inject network info into a server"""
+ """Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, 'injectNetworkInfo')
try:
@@ -161,7 +160,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('lock')
def _lock(self, req, id, body):
- """Permit admins to lock a server"""
+ """Permit admins to lock a server."""
context = req.environ['nova.context']
authorize(context, 'lock')
try:
@@ -177,7 +176,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('unlock')
def _unlock(self, req, id, body):
- """Permit admins to lock a server"""
+ """Permit admins to lock a server."""
context = req.environ['nova.context']
authorize(context, 'unlock')
try:
@@ -229,6 +228,10 @@ class AdminActionsController(wsgi.Controller):
except ValueError:
msg = _("createBackup attribute 'rotation' must be an integer")
raise exc.HTTPBadRequest(explanation=msg)
+ if rotation < 0:
+ msg = _("createBackup attribute 'rotation' must be greater "
+ "than or equal to zero")
+ raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
@@ -251,17 +254,19 @@ class AdminActionsController(wsgi.Controller):
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createBackup')
- # build location of newly-created image entity
- image_id = str(image['id'])
- image_ref = os.path.join(req.application_url, 'images', image_id)
-
resp = webob.Response(status_int=202)
- resp.headers['Location'] = image_ref
+
+ # build location of newly-created image entity if rotation is not zero
+ if rotation > 0:
+ image_id = str(image['id'])
+ image_ref = os.path.join(req.application_url, 'images', image_id)
+ resp.headers['Location'] = image_ref
+
return resp
@wsgi.action('os-migrateLive')
def _migrate_live(self, req, id, body):
- """Permit admins to (live) migrate a server to a new host"""
+ """Permit admins to (live) migrate a server to a new host."""
context = req.environ["nova.context"]
authorize(context, 'migrateLive')
@@ -302,9 +307,7 @@ class AdminActionsController(wsgi.Controller):
try:
instance = self.compute_api.get(context, id)
- self.compute_api.update(context, instance,
- vm_state=state,
- task_state=None)
+ self.compute_api.update_state(context, instance, state)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
diff --git a/nova/api/openstack/compute/contrib/agents.py b/nova/api/openstack/compute/contrib/agents.py
new file mode 100644
index 000000000..6590deafe
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/agents.py
@@ -0,0 +1,171 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import db
+from nova import exception
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'agents')
+
+
+class AgentsIndexTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('agents')
+ elem = xmlutil.SubTemplateElement(root, 'agent', selector='agents')
+ elem.set('hypervisor')
+ elem.set('os')
+ elem.set('architecture')
+ elem.set('version')
+ elem.set('md5hash')
+ elem.set('agent_id')
+ elem.set('url')
+
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class AgentController(object):
+ """
+ The agent is talking about guest agent.The host can use this for
+ things like accessing files on the disk, configuring networking,
+ or running other applications/scripts in the guest while it is
+ running. Typically this uses some hypervisor-specific transport
+ to avoid being dependent on a working network configuration.
+ Xen, VMware, and VirtualBox have guest agents,although the Xen
+ driver is the only one with an implementation for managing them
+ in openstack. KVM doesn't really have a concept of a guest agent
+ (although one could be written).
+
+ You can find the design of agent update in this link:
+ http://wiki.openstack.org/AgentUpdate
+ and find the code in nova.virt.xenapi.vmops.VMOps._boot_new_instance.
+ In this design We need update agent in guest from host, so we need
+ some interfaces to update the agent info in host.
+
+ You can find more information about the design of the GuestAgent in
+ the following link:
+ http://wiki.openstack.org/GuestAgent
+ http://wiki.openstack.org/GuestAgentXenStoreCommunication
+ """
+ @wsgi.serializers(xml=AgentsIndexTemplate)
+ def index(self, req):
+ """
+ Return a list of all agent builds. Filter by hypervisor.
+ """
+ context = req.environ['nova.context']
+ authorize(context)
+ hypervisor = None
+ agents = []
+ if 'hypervisor' in req.GET:
+ hypervisor = req.GET['hypervisor']
+
+ for agent_build in db.agent_build_get_all(context, hypervisor):
+ agents.append({'hypervisor': agent_build.hypervisor,
+ 'os': agent_build.os,
+ 'architecture': agent_build.architecture,
+ 'version': agent_build.version,
+ 'md5hash': agent_build.md5hash,
+ 'agent_id': agent_build.id,
+ 'url': agent_build.url})
+
+ return {'agents': agents}
+
+ def update(self, req, id, body):
+ """Update an existing agent build."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ para = body['para']
+ url = para['url']
+ md5hash = para['md5hash']
+ version = para['version']
+ except (TypeError, KeyError):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ try:
+ db.agent_build_update(context, id,
+ {'version': version,
+ 'url': url,
+ 'md5hash': md5hash})
+ except exception.AgentBuildNotFound as ex:
+ raise webob.exc.HTTPNotFound(explanation=str(ex))
+
+ return {"agent": {'agent_id': id, 'version': version,
+ 'url': url, 'md5hash': md5hash}}
+
+ def delete(self, req, id):
+ """Deletes an existing agent build."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ db.agent_build_destroy(context, id)
+ except exception.AgentBuildNotFound as ex:
+ raise webob.exc.HTTPNotFound(explanation=str(ex))
+
+ def create(self, req, body):
+ """Creates a new agent build."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ agent = body['agent']
+ hypervisor = agent['hypervisor']
+ os = agent['os']
+ architecture = agent['architecture']
+ version = agent['version']
+ url = agent['url']
+ md5hash = agent['md5hash']
+ except (TypeError, KeyError):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ try:
+ agent_build_ref = db.agent_build_create(context,
+ {'hypervisor': hypervisor,
+ 'os': os,
+ 'architecture': architecture,
+ 'version': version,
+ 'url': url,
+ 'md5hash': md5hash})
+ agent['agent_id'] = agent_build_ref.id
+ except Exception as ex:
+ raise webob.exc.HTTPServerError(str(ex))
+ return {'agent': agent}
+
+
+class Agents(extensions.ExtensionDescriptor):
+ """Agents support."""
+
+ name = "Agents"
+ alias = "os-agents"
+ namespace = "http://docs.openstack.org/compute/ext/agents/api/v2"
+ updated = "2012-10-28T00:00:00-00:00"
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('os-agents',
+ AgentController())
+ resources.append(resource)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py
index 9435f5980..91d138be4 100644
--- a/nova/api/openstack/compute/contrib/aggregates.py
+++ b/nova/api/openstack/compute/contrib/aggregates.py
@@ -71,11 +71,12 @@ class AggregateController(object):
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
- except (exception.AggregateNameExists,
- exception.InvalidAggregateAction):
- LOG.info(_("Cannot create aggregate with name %(name)s and "
- "availability zone %(avail_zone)s") % locals())
+ except exception.AggregateNameExists as e:
+ LOG.info(e)
raise exc.HTTPConflict
+ except exception.InvalidAggregateAction as e:
+ LOG.info(e)
+ raise
return self._marshall_aggregate(aggregate)
def show(self, req, id):
@@ -202,7 +203,7 @@ class AggregateController(object):
class Aggregates(extensions.ExtensionDescriptor):
- """Admin-only aggregate administration"""
+ """Admin-only aggregate administration."""
name = "Aggregates"
alias = "os-aggregates"
diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py
index 524f454ea..2955b68eb 100644
--- a/nova/api/openstack/compute/contrib/availability_zone.py
+++ b/nova/api/openstack/compute/contrib/availability_zone.py
@@ -18,7 +18,7 @@ from nova.api.openstack import extensions
class Availability_zone(extensions.ExtensionDescriptor):
- """Add availability_zone to the Create Server v1.1 API"""
+ """Add availability_zone to the Create Server v1.1 API."""
name = "AvailabilityZone"
alias = "os-availability-zone"
diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py
new file mode 100644
index 000000000..03e2e4ca2
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/cells.py
@@ -0,0 +1,303 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011-2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The cells extension."""
+from xml.dom import minidom
+from xml.parsers import expat
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova.compute import api as compute
+from nova import db
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
+
+authorize = extensions.extension_authorizer('compute', 'cells')
+
+
+def make_cell(elem):
+ elem.set('name')
+ elem.set('username')
+ elem.set('type')
+ elem.set('rpc_host')
+ elem.set('rpc_port')
+
+ caps = xmlutil.SubTemplateElement(elem, 'capabilities',
+ selector='capabilities')
+ cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
+ selector=xmlutil.get_items)
+ cap.text = 1
+
+
+cell_nsmap = {None: wsgi.XMLNS_V10}
+
+
+class CellTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('cell', selector='cell')
+ make_cell(root)
+ return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
+
+
+class CellsTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('cells')
+ elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
+ make_cell(elem)
+ return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
+
+
+class CellDeserializer(wsgi.XMLDeserializer):
+ """Deserializer to handle xml-formatted cell create requests."""
+
+ def _extract_capabilities(self, cap_node):
+ caps = {}
+ for cap in cap_node.childNodes:
+ cap_name = cap.tagName
+ caps[cap_name] = self.extract_text(cap)
+ return caps
+
+ def _extract_cell(self, node):
+ cell = {}
+ cell_node = self.find_first_child_named(node, 'cell')
+
+ extract_fns = {'capabilities': self._extract_capabilities}
+
+ for child in cell_node.childNodes:
+ name = child.tagName
+ extract_fn = extract_fns.get(name, self.extract_text)
+ cell[name] = extract_fn(child)
+ return cell
+
+ def default(self, string):
+ """Deserialize an xml-formatted cell create request."""
+ try:
+ node = minidom.parseString(string)
+ except expat.ExpatError:
+ msg = _("cannot understand XML")
+ raise exception.MalformedRequestBody(reason=msg)
+
+ return {'body': {'cell': self._extract_cell(node)}}
+
+
+def _filter_keys(item, keys):
+ """
+ Filters all model attributes except for keys
+ item is a dict
+
+ """
+ return dict((k, v) for k, v in item.iteritems() if k in keys)
+
+
+def _scrub_cell(cell, detail=False):
+ keys = ['name', 'username', 'rpc_host', 'rpc_port']
+ if detail:
+ keys.append('capabilities')
+
+ cell_info = _filter_keys(cell, keys)
+ cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
+ return cell_info
+
+
+class Controller(object):
+ """Controller for Cell resources."""
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def _get_cells(self, ctxt, req, detail=False):
+ """Return all cells."""
+ # Ask the CellsManager for the most recent data
+ items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
+ items = common.limited(items, req)
+ items = [_scrub_cell(item, detail=detail) for item in items]
+ return dict(cells=items)
+
+ @wsgi.serializers(xml=CellsTemplate)
+ def index(self, req):
+ """Return all cells in brief."""
+ ctxt = req.environ['nova.context']
+ authorize(ctxt)
+ return self._get_cells(ctxt, req)
+
+ @wsgi.serializers(xml=CellsTemplate)
+ def detail(self, req):
+ """Return all cells in detail."""
+ ctxt = req.environ['nova.context']
+ authorize(ctxt)
+ return self._get_cells(ctxt, req, detail=True)
+
+ @wsgi.serializers(xml=CellTemplate)
+ def info(self, req):
+ """Return name and capabilities for this cell."""
+ context = req.environ['nova.context']
+ authorize(context)
+ cell_capabs = {}
+ my_caps = CONF.cells.capabilities
+ for cap in my_caps:
+ key, value = cap.split('=')
+ cell_capabs[key] = value
+ cell = {'name': CONF.cells.name,
+ 'type': 'self',
+ 'rpc_host': None,
+ 'rpc_port': 0,
+ 'username': None,
+ 'capabilities': cell_capabs}
+ return dict(cell=cell)
+
+ @wsgi.serializers(xml=CellTemplate)
+ def show(self, req, id):
+ """Return data about the given cell name. 'id' is a cell name."""
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ cell = db.cell_get(context, id)
+ except exception.CellNotFound:
+ raise exc.HTTPNotFound()
+ return dict(cell=_scrub_cell(cell))
+
+ def delete(self, req, id):
+ """Delete a child or parent cell entry. 'id' is a cell name."""
+ context = req.environ['nova.context']
+ authorize(context)
+ num_deleted = db.cell_delete(context, id)
+ if num_deleted == 0:
+ raise exc.HTTPNotFound()
+ return {}
+
+ def _validate_cell_name(self, cell_name):
+ """Validate cell name is not empty and doesn't contain '!' or '.'."""
+ if not cell_name:
+ msg = _("Cell name cannot be empty")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ if '!' in cell_name or '.' in cell_name:
+ msg = _("Cell name cannot contain '!' or '.'")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ def _validate_cell_type(self, cell_type):
+ """Validate cell_type is 'parent' or 'child'."""
+ if cell_type not in ['parent', 'child']:
+ msg = _("Cell type must be 'parent' or 'child'")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ def _convert_cell_type(self, cell):
+ """Convert cell['type'] to is_parent boolean."""
+ if 'type' in cell:
+ self._validate_cell_type(cell['type'])
+ cell['is_parent'] = cell['type'] == 'parent'
+ del cell['type']
+ else:
+ cell['is_parent'] = False
+
+ @wsgi.serializers(xml=CellTemplate)
+ @wsgi.deserializers(xml=CellDeserializer)
+ def create(self, req, body):
+ """Create a child cell entry."""
+ context = req.environ['nova.context']
+ authorize(context)
+ if 'cell' not in body:
+ msg = _("No cell information in request")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ cell = body['cell']
+ if 'name' not in cell:
+ msg = _("No cell name in request")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ self._validate_cell_name(cell['name'])
+ self._convert_cell_type(cell)
+ cell = db.cell_create(context, cell)
+ return dict(cell=_scrub_cell(cell))
+
+ @wsgi.serializers(xml=CellTemplate)
+ @wsgi.deserializers(xml=CellDeserializer)
+ def update(self, req, id, body):
+ """Update a child cell entry. 'id' is the cell name to update."""
+ context = req.environ['nova.context']
+ authorize(context)
+ if 'cell' not in body:
+ msg = _("No cell information in request")
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ cell = body['cell']
+ cell.pop('id', None)
+ if 'name' in cell:
+ self._validate_cell_name(cell['name'])
+ self._convert_cell_type(cell)
+ try:
+ cell = db.cell_update(context, id, cell)
+ except exception.CellNotFound:
+ raise exc.HTTPNotFound()
+ return dict(cell=_scrub_cell(cell))
+
+ def sync_instances(self, req, body):
+ """Tell all cells to sync instance info."""
+ context = req.environ['nova.context']
+ authorize(context)
+ project_id = body.pop('project_id', None)
+ deleted = body.pop('deleted', False)
+ updated_since = body.pop('updated_since', None)
+ if body:
+ msg = _("Only 'updated_since' and 'project_id' are understood.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if updated_since:
+ try:
+ timeutils.parse_isotime(updated_since)
+ except ValueError:
+ msg = _('Invalid changes-since value')
+ raise exc.HTTPBadRequest(explanation=msg)
+ self.cells_rpcapi.sync_instances(context, project_id=project_id,
+ updated_since=updated_since, deleted=deleted)
+
+
+class Cells(extensions.ExtensionDescriptor):
+ """Enables cells-related functionality such as adding neighbor cells,
+ listing neighbor cells, and getting the capabilities of the local cell.
+ """
+
+ name = "Cells"
+ alias = "os-cells"
+ namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
+ updated = "2011-09-21T00:00:00+00:00"
+
+ def get_resources(self):
+ coll_actions = {
+ 'detail': 'GET',
+ 'info': 'GET',
+ 'sync_instances': 'POST',
+ }
+
+ res = extensions.ResourceExtension('os-cells',
+ Controller(), collection_actions=coll_actions)
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py
index c05a208a3..d2ce7bc5d 100644
--- a/nova/api/openstack/compute/contrib/certificates.py
+++ b/nova/api/openstack/compute/contrib/certificates.py
@@ -20,7 +20,6 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.cert.rpcapi
-from nova import flags
from nova import network
from nova.openstack.common import log as logging
@@ -89,7 +88,7 @@ class CertificatesController(object):
class Certificates(extensions.ExtensionDescriptor):
- """Certificates support"""
+ """Certificates support."""
name = "Certificates"
alias = "os-certificates"
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index 77d88144a..4e224be46 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -21,17 +21,16 @@ from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova import network
+from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'cloudpipe')
@@ -73,9 +72,11 @@ class CloudpipeController(object):
fileutils.ensure_tree(CONF.keys_path)
def _get_all_cloudpipes(self, context):
- """Get all cloudpipes"""
- return [instance for instance in self.compute_api.get_all(context)
- if instance['image_ref'] == str(CONF.vpn_image_id)
+ """Get all cloudpipes."""
+ instances = self.compute_api.get_all(context,
+ search_opts={'deleted': False})
+ return [instance for instance in instances
+ if pipelib.is_vpn_image(instance['image_ref'])
and instance['vm_state'] != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context, project_id):
diff --git a/nova/api/openstack/compute/contrib/cloudpipe_update.py b/nova/api/openstack/compute/contrib/cloudpipe_update.py
new file mode 100644
index 000000000..f6ed7bb3a
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/cloudpipe_update.py
@@ -0,0 +1,76 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import db
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'cloudpipe_update')
+
+
+class CloudpipeUpdateController(wsgi.Controller):
+ """Handle updating the vpn ip/port for cloudpipe instances."""
+
+ def __init__(self):
+ super(CloudpipeUpdateController, self).__init__()
+
+ @wsgi.action("update")
+ def update(self, req, id, body):
+ """Configure cloudpipe parameters for the project."""
+
+ context = req.environ['nova.context']
+ authorize(context)
+
+ if id != "configure-project":
+ msg = _("Unknown action %s") % id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ project_id = context.project_id
+
+ try:
+ params = body['configure_project']
+ vpn_ip = params['vpn_ip']
+ vpn_port = params['vpn_port']
+ except (TypeError, KeyError):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ networks = db.project_get_networks(context, project_id)
+ for network in networks:
+ db.network_update(context, network['id'],
+ {'vpn_public_address': vpn_ip,
+ 'vpn_public_port': int(vpn_port)})
+ return webob.exc.HTTPAccepted()
+
+
+class Cloudpipe_update(extensions.ExtensionDescriptor):
+ """Adds the ability to set the vpn ip/port for cloudpipe instances."""
+
+ name = "CloudpipeUpdate"
+ alias = "os-cloudpipe-update"
+ namespace = "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2"
+ updated = "2012-11-14T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ controller = CloudpipeUpdateController()
+ extension = extensions.ControllerExtension(self, 'os-cloudpipe',
+ controller)
+ return [extension]
diff --git a/nova/api/openstack/compute/contrib/config_drive.py b/nova/api/openstack/compute/contrib/config_drive.py
index ac294f660..5f1f7b16d 100644
--- a/nova/api/openstack/compute/contrib/config_drive.py
+++ b/nova/api/openstack/compute/contrib/config_drive.py
@@ -15,13 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Config Drive extension"""
+"""Config Drive extension."""
from nova.api.openstack.compute import servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import flags
authorize = extensions.soft_extension_authorizer('compute', 'config_drive')
@@ -72,7 +71,7 @@ class Controller(servers.Controller):
class Config_drive(extensions.ExtensionDescriptor):
- """Config Drive Extension"""
+ """Config Drive Extension."""
name = "ConfigDrive"
alias = "os-config-drive"
diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py
new file mode 100644
index 000000000..4b7d4e57f
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/coverage_ext.py
@@ -0,0 +1,267 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License
+
+# See: http://wiki.openstack.org/Nova/CoverageExtension for more information
+# and usage explanation for this API extension
+
+import os
+import re
+import sys
+import telnetlib
+import tempfile
+
+from coverage import coverage
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova.cert import rpcapi as cert_api
+from nova.compute import api as compute_api
+from nova.conductor import api as conductor_api
+from nova.console import api as console_api
+from nova.consoleauth import rpcapi as consoleauth_api
+from nova import db
+from nova.network import api as network_api
+from nova.openstack.common import log as logging
+from nova.scheduler import rpcapi as scheduler_api
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'coverage_ext')
+
+
+class CoverageController(object):
+ """The Coverage report API controller for the OpenStack API."""
+ def __init__(self):
+ self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
+ data_out = os.path.join(self.data_path, '.nova-coverage')
+ self.coverInst = coverage(data_file=data_out)
+ self.compute_api = compute_api.API()
+ self.network_api = network_api.API()
+ self.conductor_api = conductor_api.API()
+ self.consoleauth_api = consoleauth_api.ConsoleAuthAPI()
+ self.console_api = console_api.API()
+ self.scheduler_api = scheduler_api.SchedulerAPI()
+ self.cert_api = cert_api.CertAPI()
+ self.services = []
+ self.combine = False
+ super(CoverageController, self).__init__()
+
+ def _find_services(self, req):
+ """Returns a list of services."""
+ context = req.environ['nova.context']
+ services = db.service_get_all(context, False)
+ hosts = []
+ for serv in services:
+ hosts.append({"service": serv["topic"], "host": serv["host"]})
+ return hosts
+
+ def _find_ports(self, req, hosts):
+ """Return a list of backdoor ports for all services in the list."""
+ context = req.environ['nova.context']
+
+ apicommands = {
+ "compute": self.compute_api.get_backdoor_port,
+ "network": self.network_api.get_backdoor_port,
+ "conductor": self.conductor_api.get_backdoor_port,
+ "consoleauth": self.consoleauth_api.get_backdoor_port,
+ "console": self.console_api.get_backdoor_port,
+ "scheduler": self.scheduler_api.get_backdoor_port,
+ "cert": self.cert_api.get_backdoor_port,
+ }
+ ports = []
+ #TODO(mtreinish): Figure out how to bind the backdoor socket to 0.0.0.0
+ # Currently this will only work if the host is resolved as loopback on
+ # the same host as api-server
+ for host in hosts:
+ if host['service'] in apicommands:
+ get_port_fn = apicommands[host['service']]
+ _host = host
+ _host['port'] = get_port_fn(context, host['host'])
+ #NOTE(mtreinish): if the port is None then it wasn't set in
+ # the configuration file for this service. However, that
+ # doesn't necessarily mean that we don't have backdoor ports
+ # for all the services. So, skip the telnet connection for
+ # this service.
+ if _host['port']:
+ ports.append(_host)
+ else:
+ LOG.warning(_("Can't connect to service: %s, no port"
+ "specified\n"), host['service'])
+ else:
+ LOG.debug(_("No backdoor API command for service: %s\n"), host)
+ return ports
+
+ def _start_coverage_telnet(self, tn, service):
+ tn.write('import sys\n')
+ tn.write('from coverage import coverage\n')
+ if self.combine:
+ data_file = os.path.join(self.data_path,
+ '.nova-coverage.%s' % str(service))
+ tn.write("coverInst = coverage(data_file='%s')\n)" % data_file)
+ else:
+ tn.write('coverInst = coverage()\n')
+ tn.write('coverInst.skipModules = sys.modules.keys()\n')
+ tn.write("coverInst.start()\n")
+ tn.write("print 'finished'\n")
+ tn.expect([re.compile('finished')])
+
+ def _start_coverage(self, req, body):
+ '''Begin recording coverage information.'''
+ LOG.debug(_("Coverage begin"))
+ body = body['start']
+ self.combine = False
+ if 'combine' in body.keys():
+ self.combine = bool(body['combine'])
+ self.coverInst.skipModules = sys.modules.keys()
+ self.coverInst.start()
+ hosts = self._find_services(req)
+ ports = self._find_ports(req, hosts)
+ self.services = []
+ for service in ports:
+ service['telnet'] = telnetlib.Telnet(service['host'],
+ service['port'])
+ self.services.append(service)
+ self._start_coverage_telnet(service['telnet'], service['service'])
+
+ def _stop_coverage_telnet(self, tn):
+ tn.write("coverInst.stop()\n")
+ tn.write("coverInst.save()\n")
+ tn.write("print 'finished'\n")
+ tn.expect([re.compile('finished')])
+
+ def _check_coverage(self):
+ try:
+ self.coverInst.stop()
+ self.coverInst.save()
+ except AssertionError:
+ return True
+ return False
+
+ def _stop_coverage(self, req):
+ for service in self.services:
+ self._stop_coverage_telnet(service['telnet'])
+ if self._check_coverage():
+ msg = _("Coverage not running")
+ raise exc.HTTPNotFound(explanation=msg)
+ return {'path': self.data_path}
+
+ def _report_coverage_telnet(self, tn, path, xml=False):
+ if xml:
+ execute = str("coverInst.xml_report(outfile='%s')\n" % path)
+ tn.write(execute)
+ tn.write("print 'finished'\n")
+ tn.expect([re.compile('finished')])
+ else:
+ execute = str("output = open('%s', 'w')\n" % path)
+ tn.write(execute)
+ tn.write("coverInst.report(file=output)\n")
+ tn.write("output.close()\n")
+ tn.write("print 'finished'\n")
+ tn.expect([re.compile('finished')])
+ tn.close()
+
+ def _report_coverage(self, req, body):
+ self._stop_coverage(req)
+ xml = False
+ html = False
+ path = None
+
+ body = body['report']
+ if 'file' in body.keys():
+ path = body['file']
+ if path != os.path.basename(path):
+ msg = _("Invalid path")
+ raise exc.HTTPBadRequest(explanation=msg)
+ path = os.path.join(self.data_path, path)
+ else:
+ msg = _("No path given for report file")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if 'xml' in body.keys():
+ xml = body['xml']
+ elif 'html' in body.keys():
+ if not self.combine:
+ msg = _("You can't use html reports without combining")
+ raise exc.HTTPBadRequest(explanation=msg)
+ html = body['html']
+
+ if self.combine:
+ self.coverInst.combine()
+ if xml:
+ self.coverInst.xml_report(outfile=path)
+ elif html:
+ if os.path.isdir(path):
+ msg = _("Directory conflict: %s already exists")
+ raise exc.HTTPBadRequest(explanation=msg)
+ self.coverInst.html_report(directory=path)
+ else:
+ output = open(path, 'w')
+ self.coverInst.report(file=output)
+ output.close()
+ for service in self.services:
+ service['telnet'].close()
+ else:
+ if xml:
+ apipath = path + '.api'
+ self.coverInst.xml_report(outfile=apipath)
+ for service in self.services:
+ self._report_coverage_telnet(service['telnet'],
+ path + '.%s'
+ % service['service'],
+ xml=True)
+ else:
+ output = open(path + '.api', 'w')
+ self.coverInst.report(file=output)
+ for service in self.services:
+ self._report_coverage_telnet(service['telnet'],
+ path + '.%s' % service['service'])
+ output.close()
+ return {'path': path}
+
+ def action(self, req, body):
+ _actions = {
+ 'start': self._start_coverage,
+ 'stop': self._stop_coverage,
+ 'report': self._report_coverage,
+ }
+ authorize(req.environ['nova.context'])
+ for action, data in body.iteritems():
+ if action == 'stop':
+ return _actions[action](req)
+ elif action == 'report' or action == 'start':
+ return _actions[action](req, body)
+ else:
+ msg = _("Coverage doesn't have %s action") % action
+ raise exc.HTTPBadRequest(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
+
+
+class Coverage_ext(extensions.ExtensionDescriptor):
+ """Enable Nova Coverage."""
+
+ name = "Coverage"
+ alias = "os-coverage"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "coverage/api/v2")
+ updated = "2012-10-15T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension('os-coverage',
+ controller=CoverageController(),
+ collection_actions={"action": "POST"})
+ resources.append(res)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/createserverext.py b/nova/api/openstack/compute/contrib/createserverext.py
index 116511fe5..db9838c98 100644
--- a/nova/api/openstack/compute/contrib/createserverext.py
+++ b/nova/api/openstack/compute/contrib/createserverext.py
@@ -18,7 +18,7 @@ from nova.api.openstack import extensions
class Createserverext(extensions.ExtensionDescriptor):
- """Extended support to the Create Server v1.1 API"""
+ """Extended support to the Create Server v1.1 API."""
name = "Createserverext"
alias = "os-create-server-ext"
diff --git a/nova/api/openstack/compute/contrib/deferred_delete.py b/nova/api/openstack/compute/contrib/deferred_delete.py
index ea7ac00f9..94bf63ab4 100644
--- a/nova/api/openstack/compute/contrib/deferred_delete.py
+++ b/nova/api/openstack/compute/contrib/deferred_delete.py
@@ -43,7 +43,8 @@ class DeferredDeleteController(wsgi.Controller):
try:
self.compute_api.restore(context, instance)
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error),
+ raise webob.exc.HTTPRequestEntityTooLarge(
+ explanation=unicode(error),
headers={'Retry-After': 0})
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
@@ -65,7 +66,7 @@ class DeferredDeleteController(wsgi.Controller):
class Deferred_delete(extensions.ExtensionDescriptor):
- """Instance deferred delete"""
+ """Instance deferred delete."""
name = "DeferredDelete"
alias = "os-deferred-delete"
diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py
index 903f930fc..4a8fe9ce5 100644
--- a/nova/api/openstack/compute/contrib/disk_config.py
+++ b/nova/api/openstack/compute/contrib/disk_config.py
@@ -170,7 +170,7 @@ class ServerDiskConfigController(wsgi.Controller):
class Disk_config(extensions.ExtensionDescriptor):
- """Disk Management Extension"""
+ """Disk Management Extension."""
name = "DiskConfig"
alias = ALIAS
diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py
index 15f6456ea..51fd7f375 100644
--- a/nova/api/openstack/compute/contrib/extended_server_attributes.py
+++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py
@@ -19,7 +19,6 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import db
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index f7ccdcbff..8a7c67f18 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -18,7 +18,6 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -62,7 +61,7 @@ class ExtendedStatusController(wsgi.Controller):
class Extended_status(extensions.ExtensionDescriptor):
- """Extended Status support"""
+ """Extended Status support."""
name = "ExtendedStatus"
alias = "OS-EXT-STS"
diff --git a/nova/api/openstack/compute/contrib/fixed_ips.py b/nova/api/openstack/compute/contrib/fixed_ips.py
new file mode 100644
index 000000000..e70416bac
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/fixed_ips.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova import db
+from nova import exception
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'fixed_ips')
+
+
+class FixedIPController(object):
+ def show(self, req, id):
+ """Return data about the given fixed ip."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ fixed_ip = db.fixed_ip_get_by_address_detailed(context, id)
+ except exception.FixedIpNotFoundForAddress as ex:
+ raise webob.exc.HTTPNotFound(explanation=str(ex))
+
+ fixed_ip_info = {"fixed_ip": {}}
+ if fixed_ip[1] is None:
+ msg = _("Fixed IP %s has been deleted") % id
+ raise webob.exc.HTTPNotFound(explanation=msg)
+
+ fixed_ip_info['fixed_ip']['cidr'] = fixed_ip[1]['cidr']
+ fixed_ip_info['fixed_ip']['address'] = fixed_ip[0]['address']
+
+ if fixed_ip[2]:
+ fixed_ip_info['fixed_ip']['hostname'] = fixed_ip[2]['hostname']
+ fixed_ip_info['fixed_ip']['host'] = fixed_ip[2]['host']
+ else:
+ fixed_ip_info['fixed_ip']['hostname'] = None
+ fixed_ip_info['fixed_ip']['host'] = None
+
+ return fixed_ip_info
+
+ def action(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ if 'reserve' in body:
+ return self._set_reserved(context, id, True)
+ elif 'unreserve' in body:
+ return self._set_reserved(context, id, False)
+ else:
+ raise webob.exc.HTTPBadRequest(
+ explanation="No valid action specified")
+
+ def _set_reserved(self, context, address, reserved):
+ try:
+ fixed_ip = db.fixed_ip_get_by_address(context, address)
+ db.fixed_ip_update(context, fixed_ip['address'],
+ {'reserved': reserved})
+ except exception.FixedIpNotFoundForAddress:
+ msg = _("Fixed IP %s not found") % address
+ raise webob.exc.HTTPNotFound(explanation=msg)
+
+ return webob.exc.HTTPAccepted()
+
+
+class Fixed_ips(extensions.ExtensionDescriptor):
+ """Fixed IPs support."""
+
+ name = "FixedIPs"
+ alias = "os-fixed-ips"
+ namespace = "http://docs.openstack.org/compute/ext/fixed_ips/api/v2"
+ updated = "2012-10-18T13:25:27-06:00"
+
+ def __init__(self, ext_mgr):
+ ext_mgr.register(self)
+
+ def get_resources(self):
+ member_actions = {'action': 'POST'}
+ resources = []
+ resource = extensions.ResourceExtension('os-fixed-ips',
+ FixedIPController(),
+ member_actions=member_actions)
+ resources.append(resource)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index 6fd2fc460..1c5006576 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -202,7 +202,7 @@ class FlavorActionController(wsgi.Controller):
class Flavor_access(extensions.ExtensionDescriptor):
- """Flavor access supprt"""
+ """Flavor access support."""
name = "FlavorAccess"
alias = "os-flavor-access"
diff --git a/nova/api/openstack/compute/contrib/flavor_disabled.py b/nova/api/openstack/compute/contrib/flavor_disabled.py
index 48181954b..62f902409 100644
--- a/nova/api/openstack/compute/contrib/flavor_disabled.py
+++ b/nova/api/openstack/compute/contrib/flavor_disabled.py
@@ -53,7 +53,7 @@ class FlavorDisabledController(wsgi.Controller):
class Flavor_disabled(extensions.ExtensionDescriptor):
- """Support to show the disabled status of a flavor"""
+ """Support to show the disabled status of a flavor."""
name = "FlavorDisabled"
alias = "OS-FLV-DISABLED"
diff --git a/nova/api/openstack/compute/contrib/flavor_rxtx.py b/nova/api/openstack/compute/contrib/flavor_rxtx.py
index 6dce1bee1..84ccd0c60 100644
--- a/nova/api/openstack/compute/contrib/flavor_rxtx.py
+++ b/nova/api/openstack/compute/contrib/flavor_rxtx.py
@@ -53,7 +53,7 @@ class FlavorRxtxController(wsgi.Controller):
class Flavor_rxtx(extensions.ExtensionDescriptor):
- """Support to show the rxtx status of a flavor"""
+ """Support to show the rxtx status of a flavor."""
name = "FlavorRxtx"
alias = "os-flavor-rxtx"
diff --git a/nova/api/openstack/compute/contrib/flavor_swap.py b/nova/api/openstack/compute/contrib/flavor_swap.py
index 8e0277979..707b3e3fa 100644
--- a/nova/api/openstack/compute/contrib/flavor_swap.py
+++ b/nova/api/openstack/compute/contrib/flavor_swap.py
@@ -53,7 +53,7 @@ class FlavorSwapController(wsgi.Controller):
class Flavor_swap(extensions.ExtensionDescriptor):
- """Support to show the swap status of a flavor"""
+ """Support to show the swap status of a flavor."""
name = "FlavorSwap"
alias = "os-flavor-swap"
diff --git a/nova/api/openstack/compute/contrib/flavorextradata.py b/nova/api/openstack/compute/contrib/flavorextradata.py
index e2bdadf16..8f9640c98 100644
--- a/nova/api/openstack/compute/contrib/flavorextradata.py
+++ b/nova/api/openstack/compute/contrib/flavorextradata.py
@@ -63,7 +63,7 @@ class FlavorextradataController(wsgi.Controller):
class Flavorextradata(extensions.ExtensionDescriptor):
- """Provide additional data for flavors"""
+ """Provide additional data for flavors."""
name = "FlavorExtraData"
alias = "OS-FLV-EXT-DATA"
diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py
index 77af25c9f..c8deb7b4c 100644
--- a/nova/api/openstack/compute/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" The instance type extra specs extension"""
+"""The instance type extra specs extension."""
from webob import exc
@@ -35,7 +35,7 @@ class ExtraSpecsTemplate(xmlutil.TemplateBuilder):
class FlavorExtraSpecsController(object):
- """ The flavor extra specs API controller for the OpenStack API """
+ """The flavor extra specs API controller for the OpenStack API."""
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.instance_type_extra_specs_get(context, flavor_id)
@@ -51,7 +51,7 @@ class FlavorExtraSpecsController(object):
@wsgi.serializers(xml=ExtraSpecsTemplate)
def index(self, req, flavor_id):
- """ Returns the list of extra specs for a givenflavor """
+ """Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
authorize(context)
return self._get_extra_specs(context, flavor_id)
@@ -92,7 +92,7 @@ class FlavorExtraSpecsController(object):
@wsgi.serializers(xml=ExtraSpecsTemplate)
def show(self, req, flavor_id, id):
- """ Return a single extra spec item """
+ """Return a single extra spec item."""
context = req.environ['nova.context']
authorize(context)
specs = self._get_extra_specs(context, flavor_id)
@@ -102,14 +102,14 @@ class FlavorExtraSpecsController(object):
raise exc.HTTPNotFound()
def delete(self, req, flavor_id, id):
- """ Deletes an existing extra spec """
+ """Deletes an existing extra spec."""
context = req.environ['nova.context']
authorize(context)
db.instance_type_extra_specs_delete(context, flavor_id, id)
class Flavorextraspecs(extensions.ExtensionDescriptor):
- """Instance type (flavor) extra specs"""
+ """Instance type (flavor) extra specs."""
name = "FlavorExtraSpecs"
alias = "os-flavor-extra-specs"
diff --git a/nova/api/openstack/compute/contrib/floating_ip_dns.py b/nova/api/openstack/compute/contrib/floating_ip_dns.py
index 667bec294..fbea0acf9 100644
--- a/nova/api/openstack/compute/contrib/floating_ip_dns.py
+++ b/nova/api/openstack/compute/contrib/floating_ip_dns.py
@@ -129,7 +129,7 @@ def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
class FloatingIPDNSDomainController(object):
- """DNS domain controller for OpenStack API"""
+ """DNS domain controller for OpenStack API."""
def __init__(self):
self.network_api = network.API()
@@ -151,7 +151,7 @@ class FloatingIPDNSDomainController(object):
@wsgi.serializers(xml=DomainTemplate)
def update(self, req, id, body):
- """Add or modify domain entry"""
+ """Add or modify domain entry."""
context = req.environ['nova.context']
authorize(context)
fqdomain = _unquote_domain(id)
@@ -179,7 +179,7 @@ class FloatingIPDNSDomainController(object):
area_name: area})
def delete(self, req, id):
- """Delete the domain identified by id. """
+ """Delete the domain identified by id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(id)
@@ -194,7 +194,7 @@ class FloatingIPDNSDomainController(object):
class FloatingIPDNSEntryController(object):
- """DNS Entry controller for OpenStack API"""
+ """DNS Entry controller for OpenStack API."""
def __init__(self):
self.network_api = network.API()
@@ -235,7 +235,7 @@ class FloatingIPDNSEntryController(object):
@wsgi.serializers(xml=FloatingIPDNSTemplate)
def update(self, req, domain_id, id, body):
- """Add or modify dns entry"""
+ """Add or modify dns entry."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
@@ -263,7 +263,7 @@ class FloatingIPDNSEntryController(object):
'domain': domain})
def delete(self, req, domain_id, id):
- """Delete the entry identified by req and id. """
+ """Delete the entry identified by req and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
@@ -278,7 +278,7 @@ class FloatingIPDNSEntryController(object):
class Floating_ip_dns(extensions.ExtensionDescriptor):
- """Floating IP DNS support"""
+ """Floating IP DNS support."""
name = "FloatingIpDns"
alias = "os-floating-ip-dns"
diff --git a/nova/api/openstack/compute/contrib/floating_ip_pools.py b/nova/api/openstack/compute/contrib/floating_ip_pools.py
index abd422f0d..3361230dd 100644
--- a/nova/api/openstack/compute/contrib/floating_ip_pools.py
+++ b/nova/api/openstack/compute/contrib/floating_ip_pools.py
@@ -76,7 +76,7 @@ class FloatingIPPoolsController(object):
class Floating_ip_pools(extensions.ExtensionDescriptor):
- """Floating IPs support"""
+ """Floating IPs support."""
name = "FloatingIpPools"
alias = "os-floating-ip-pools"
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index c2e1cadce..3f00136f5 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -317,7 +317,7 @@ class FloatingIPActionController(wsgi.Controller):
class Floating_ips(extensions.ExtensionDescriptor):
- """Floating IPs support"""
+ """Floating IPs support."""
name = "FloatingIps"
alias = "os-floating-ips"
diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
new file mode 100644
index 000000000..f5b8d24dd
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
@@ -0,0 +1,173 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova import db
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+
+CONF = cfg.CONF
+CONF.import_opt('default_floating_pool', 'nova.network.manager')
+CONF.import_opt('public_interface', 'nova.network.linux_net')
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'floating_ips_bulk')
+
+
+class FloatingIPBulkController(object):
+
+ def index(self, req):
+ """Return a list of all floating ips."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ return self._get_floating_ip_info(context)
+
+ def show(self, req, id):
+ """Return a list of all floating ips for a given host."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ return self._get_floating_ip_info(context, id)
+
+ def _get_floating_ip_info(self, context, host=None):
+ floating_ip_info = {"floating_ip_info": []}
+
+ try:
+ if host is None:
+ floating_ips = db.floating_ip_get_all(context)
+ else:
+ floating_ips = db.floating_ip_get_all_by_host(context, host)
+ except exception.NoFloatingIpsDefined:
+ return floating_ip_info
+
+ for floating_ip in floating_ips:
+ instance_uuid = None
+ if floating_ip['fixed_ip_id']:
+ fixed_ip = db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
+ instance_uuid = fixed_ip['instance_uuid']
+
+ result = {'address': floating_ip['address'],
+ 'pool': floating_ip['pool'],
+ 'interface': floating_ip['interface'],
+ 'project_id': floating_ip['project_id'],
+ 'instance_uuid': instance_uuid}
+ floating_ip_info['floating_ip_info'].append(result)
+
+ return floating_ip_info
+
+ def create(self, req, body):
+ """Bulk create floating ips."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ if not 'floating_ips_bulk_create' in body:
+ raise webob.exc.HTTPUnprocessableEntity()
+ params = body['floating_ips_bulk_create']
+
+ LOG.debug(params)
+
+ if not 'ip_range' in params:
+ raise webob.exc.HTTPUnprocessableEntity()
+ ip_range = params['ip_range']
+
+ pool = params.get('pool', CONF.default_floating_pool)
+ interface = params.get('interface', CONF.public_interface)
+
+ try:
+ ips = ({'address': str(address),
+ 'pool': pool,
+ 'interface': interface}
+ for address in self._address_to_hosts(ip_range))
+ except exception.InvalidInput as exc:
+ raise webob.exc.HTTPBadRequest(explanation=str(exc))
+
+ try:
+ db.floating_ip_bulk_create(context, ips)
+ except exception.FloatingIpExists as exc:
+ raise webob.exc.HTTPBadRequest(explanation=str(exc))
+
+ return {"floating_ips_bulk_create": {"ip_range": ip_range,
+ "pool": pool,
+ "interface": interface}}
+
+ def update(self, req, id, body):
+ """Bulk delete floating IPs."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ if id != "delete":
+ raise webob.exc.HTTPNotFound("Unknown action")
+
+ try:
+ ip_range = body['ip_range']
+ except (TypeError, KeyError):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ try:
+ ips = ({'address': str(address)}
+ for address in self._address_to_hosts(ip_range))
+ except exception.InvalidInput as exc:
+ raise webob.exc.HTTPBadRequest(explanation=str(exc))
+ db.floating_ip_bulk_destroy(context, ips)
+
+ return {"floating_ips_bulk_delete": ip_range}
+
+ def _address_to_hosts(self, addresses):
+ """
+ Iterate over hosts within an address range.
+
+ If an explicit range specifier is missing, the parameter is
+ interpreted as a specific individual address.
+ """
+ try:
+ return [netaddr.IPAddress(addresses)]
+ except ValueError:
+ net = netaddr.IPNetwork(addresses)
+ if net.size < 4:
+ reason = _("/%s should be specified as single address(es) "
+ "not in cidr format") % net.prefixlen
+ raise exception.InvalidInput(reason=reason)
+ else:
+ return net.iter_hosts()
+ except netaddr.AddrFormatError as exc:
+ raise exception.InvalidInput(reason=str(exc))
+
+
+class Floating_ips_bulk(extensions.ExtensionDescriptor):
+ """Bulk handling of Floating IPs."""
+
+ name = "FloatingIpsBulk"
+ alias = "os-floating-ips-bulk"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "floating_ips_bulk/api/v2")
+ updated = "2012-10-29T13:25:27-06:00"
+
+ def __init__(self, ext_mgr):
+ ext_mgr.register(self)
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('os-floating-ips-bulk',
+ FloatingIPBulkController())
+ resources.append(resource)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/fping.py b/nova/api/openstack/compute/contrib/fping.py
new file mode 100644
index 000000000..282be19ca
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/fping.py
@@ -0,0 +1,160 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+import os
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova import compute
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'fping')
+authorize_all_tenants = extensions.extension_authorizer(
+ 'compute', 'fping:all_tenants')
+fping_opts = [
+ cfg.StrOpt("fping_path",
+ default="/usr/sbin/fping",
+ help="Full path to fping."),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(fping_opts)
+
+
+class FpingController(object):
+
+ def __init__(self, network_api=None):
+ self.compute_api = compute.API()
+ self.last_call = {}
+
+ def check_fping(self):
+ if not os.access(CONF.fping_path, os.X_OK):
+ raise exc.HTTPServiceUnavailable(
+ explanation=_("fping utility is not found."))
+
+ @staticmethod
+ def fping(ips):
+ fping_ret = utils.execute(CONF.fping_path, *ips,
+ check_exit_code=False)
+ if not fping_ret:
+ return set()
+ alive_ips = set()
+ for line in fping_ret[0].split("\n"):
+ ip = line.split(" ", 1)[0]
+ if "alive" in line:
+ alive_ips.add(ip)
+ return alive_ips
+
+ @staticmethod
+ def _get_instance_ips(context, instance):
+ ret = []
+ for network in common.get_networks_for_instance(
+ context, instance).values():
+ all_ips = itertools.chain(network["ips"], network["floating_ips"])
+ ret += [ip["address"] for ip in all_ips]
+ return ret
+
+ def index(self, req):
+ context = req.environ["nova.context"]
+ search_opts = dict(deleted=False)
+ if "all_tenants" in req.GET:
+ authorize_all_tenants(context)
+ else:
+ authorize(context)
+ if context.project_id:
+ search_opts["project_id"] = context.project_id
+ else:
+ search_opts["user_id"] = context.user_id
+ self.check_fping()
+ include = req.GET.get("include", None)
+ if include:
+ include = set(include.split(","))
+ exclude = set()
+ else:
+ include = None
+ exclude = req.GET.get("exclude", None)
+ if exclude:
+ exclude = set(exclude.split(","))
+ else:
+ exclude = set()
+
+ instance_list = self.compute_api.get_all(
+ context, search_opts=search_opts)
+ ip_list = []
+ instance_ips = {}
+ instance_projects = {}
+
+ for instance in instance_list:
+ uuid = instance["uuid"]
+ if uuid in exclude or (include is not None and
+ uuid not in include):
+ continue
+ ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
+ instance_ips[uuid] = ips
+ instance_projects[uuid] = instance["project_id"]
+ ip_list += ips
+ alive_ips = self.fping(ip_list)
+ res = []
+ for instance_uuid, ips in instance_ips.iteritems():
+ res.append({
+ "id": instance_uuid,
+ "project_id": instance_projects[instance_uuid],
+ "alive": bool(set(ips) & alive_ips),
+ })
+ return {"servers": res}
+
+ def show(self, req, id):
+ try:
+ context = req.environ["nova.context"]
+ authorize(context)
+ self.check_fping()
+ instance = self.compute_api.get(context, id)
+ ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
+ alive_ips = self.fping(ips)
+ return {
+ "server": {
+ "id": instance["uuid"],
+ "project_id": instance["project_id"],
+ "alive": bool(set(ips) & alive_ips),
+ }
+ }
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+
+class Fping(extensions.ExtensionDescriptor):
+ """Fping Management Extension."""
+
+ name = "Fping"
+ alias = "os-fping"
+ namespace = "http://docs.openstack.org/compute/ext/fping/api/v1.1"
+ updated = "2012-07-06T00:00:00+00:00"
+
+ def get_resources(self):
+ res = extensions.ResourceExtension(
+ "os-fping",
+ FpingController())
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/hide_server_addresses.py b/nova/api/openstack/compute/contrib/hide_server_addresses.py
new file mode 100644
index 000000000..bb8ee553a
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/hide_server_addresses.py
@@ -0,0 +1,89 @@
+# Copyright 2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Extension for hiding server addresses in certain states."""
+
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.compute import vm_states
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+
+opts = [
+ cfg.ListOpt('osapi_hide_server_address_states',
+ default=[vm_states.BUILDING],
+ help='List of instance states that should hide network info'),
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(opts)
+LOG = logging.getLogger(__name__)
+
+authorize = extensions.soft_extension_authorizer('compute',
+ 'hide_server_addresses')
+
+
+class Controller(wsgi.Controller):
+ def __init__(self, *args, **kwargs):
+ super(Controller, self).__init__(*args, **kwargs)
+ hidden_states = CONF.osapi_hide_server_address_states
+
+ # NOTE(jkoelker) _ is not considered uppercase ;)
+ valid_vm_states = [getattr(vm_states, state)
+ for state in dir(vm_states)
+ if state.isupper()]
+ self.hide_address_states = [state.lower()
+ for state in hidden_states
+ if state in valid_vm_states]
+
+ def _perhaps_hide_addresses(self, instance, resp_server):
+ if instance.get('vm_state') in self.hide_address_states:
+ resp_server['addresses'] = {}
+
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ resp = resp_obj
+ if not authorize(req.environ['nova.context']):
+ return
+
+ if 'server' in resp.obj and 'addresses' in resp.obj['server']:
+ instance = req.get_db_instance(id)
+ self._perhaps_hide_addresses(instance, resp.obj['server'])
+
+ @wsgi.extends
+ def detail(self, req, resp_obj):
+ resp = resp_obj
+ if not authorize(req.environ['nova.context']):
+ return
+
+ for server in list(resp.obj['servers']):
+ if 'addresses' in server:
+ instance = req.get_db_instance(server['id'])
+ self._perhaps_hide_addresses(instance, server)
+
+
+class Hide_server_addresses(extensions.ExtensionDescriptor):
+ """Support hiding server addresses in certain states."""
+
+ name = 'HideServerAddresses'
+ alias = 'os-hide-server-addresses'
+ namespace = ('http://docs.openstack.org/compute/ext/'
+ 'hide_server_addresses/api/v1.1')
+ updated = '2012-12-11T00:00:00+00:00'
+
+ def get_controller_extensions(self):
+ return [extensions.ControllerExtension(self, 'servers', Controller())]
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 237872405..d1b39d6db 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -22,10 +22,8 @@ from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.compute import api as compute_api
-from nova import db
+from nova import compute
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -34,11 +32,7 @@ authorize = extensions.extension_authorizer('compute', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
- def shimmer(obj, do_raise=False):
- # A bare list is passed in; we need to wrap it in a dict
- return dict(hosts=obj)
-
- root = xmlutil.TemplateElement('hosts', selector=shimmer)
+ root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('host_name')
elem.set('service')
@@ -75,7 +69,7 @@ class HostShowTemplate(xmlutil.TemplateBuilder):
return xmlutil.MasterTemplate(root, 1)
-class HostDeserializer(wsgi.XMLDeserializer):
+class HostUpdateDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = minidom.parseString(string)
@@ -84,144 +78,232 @@ class HostDeserializer(wsgi.XMLDeserializer):
raise exception.MalformedRequestBody(reason=msg)
updates = {}
- for child in node.childNodes[0].childNodes:
- updates[child.tagName] = self.extract_text(child)
-
- return dict(body=updates)
+ updates_node = self.find_first_child_named(node, 'updates')
+ if updates_node is not None:
+ maintenance = self.find_first_child_named(updates_node,
+ 'maintenance_mode')
+ if maintenance is not None:
+ updates[maintenance.tagName] = self.extract_text(maintenance)
+ status = self.find_first_child_named(updates_node, 'status')
+ if status is not None:
+ updates[status.tagName] = self.extract_text(status)
-def _list_hosts(req, service=None):
- """Returns a summary list of hosts, optionally filtering
- by service type.
- """
- context = req.environ['nova.context']
- services = db.service_get_all(context, False)
- zone = ''
- if 'zone' in req.GET:
- zone = req.GET['zone']
- if zone:
- services = [s for s in services if s['availability_zone'] == zone]
- hosts = []
- for host in services:
- hosts.append({"host_name": host['host'], 'service': host['topic'],
- 'zone': host['availability_zone']})
- if service:
- hosts = [host for host in hosts
- if host["service"] == service]
- return hosts
-
-
-def check_host(fn):
- """Makes sure that the host exists."""
- def wrapped(self, req, id, service=None, *args, **kwargs):
- listed_hosts = _list_hosts(req, service)
- hosts = [h["host_name"] for h in listed_hosts]
- if id in hosts:
- return fn(self, req, id, *args, **kwargs)
- else:
- message = _("Host '%s' could not be found.") % id
- raise webob.exc.HTTPNotFound(explanation=message)
- return wrapped
+ return dict(body=updates)
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
- self.api = compute_api.HostAPI()
+ self.api = compute.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
- authorize(req.environ['nova.context'])
- return {'hosts': _list_hosts(req)}
+ """
+ :returns: A dict in the format:
+
+ {'hosts': [{'host_name': 'some.host.name',
+ 'service': 'cells'},
+ {'host_name': 'some.other.host.name',
+ 'service': 'cells'},
+ {'host_name': 'some.celly.host.name',
+ 'service': 'cells'},
+ {'host_name': 'console1.host.com',
+ 'service': 'consoleauth'},
+ {'host_name': 'network1.host.com',
+ 'service': 'network'},
+ {'host_name': 'netwwork2.host.com',
+ 'service': 'network'},
+ {'host_name': 'sched1.host.com',
+ 'service': 'scheduler'},
+ {'host_name': 'sched2.host.com',
+ 'service': 'scheduler'},
+ {'host_name': 'vol1.host.com',
+ 'service': 'volume'}]}
+ """
+ context = req.environ['nova.context']
+ authorize(context)
+ filters = {}
+ zone = req.GET.get('zone', None)
+ if zone:
+ filters['availability_zone'] = zone
+ services = self.api.service_get_all(context, filters=filters)
+ hosts = []
+ for service in services:
+ hosts.append({'host_name': service['host'],
+ 'service': service['topic'],
+ 'zone': service['availability_zone']})
+ return {'hosts': hosts}
@wsgi.serializers(xml=HostUpdateTemplate)
- @wsgi.deserializers(xml=HostDeserializer)
- @check_host
+ @wsgi.deserializers(xml=HostUpdateDeserializer)
def update(self, req, id, body):
- authorize(req.environ['nova.context'])
- update_values = {}
- for raw_key, raw_val in body.iteritems():
- key = raw_key.lower().strip()
- val = raw_val.lower().strip()
- if key == "status":
- if val in ("enable", "disable"):
- update_values['status'] = val.startswith("enable")
- else:
- explanation = _("Invalid status: '%s'") % raw_val
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- elif key == "maintenance_mode":
- if val not in ['enable', 'disable']:
- explanation = _("Invalid mode: '%s'") % raw_val
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- update_values['maintenance_mode'] = val == 'enable'
+ """
+ :param body: example format {'status': 'enable',
+ 'maintenance_mode': 'enable'}
+ :returns:
+ """
+ def read_enabled(orig_val, msg):
+ """
+ :param orig_val: A string with either 'enable' or 'disable'. May
+ be surrounded by whitespace, and case doesn't
+ matter
+ :param msg: The message to be passed to HTTPBadRequest. A single
+ %s will be replaced with orig_val.
+ :returns: True for 'enabled' and False for 'disabled'
+ """
+ val = orig_val.strip().lower()
+ if val == "enable":
+ return True
+ elif val == "disable":
+ return False
else:
- explanation = _("Invalid update setting: '%s'") % raw_key
- raise webob.exc.HTTPBadRequest(explanation=explanation)
-
- # this is for handling multiple settings at the same time:
- # the result dictionaries are merged in the first one.
- # Note: the 'host' key will always be the same so it's
- # okay that it gets overwritten.
- update_setters = {'status': self._set_enabled_status,
- 'maintenance_mode': self._set_host_maintenance}
- result = {}
- for key, value in update_values.iteritems():
- result.update(update_setters[key](req, id, value))
+ raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
+ context = req.environ['nova.context']
+ authorize(context)
+ # See what the user wants to 'update'
+ params = dict([(k.strip().lower(), v) for k, v in body.iteritems()])
+ orig_status = status = params.pop('status', None)
+ orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
+ # Validate the request
+ if len(params) > 0:
+ # Some extra param was passed. Fail.
+ explanation = _("Invalid update setting: '%s'") % params.keys()[0]
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ if orig_status is not None:
+ status = read_enabled(orig_status, _("Invalid status: '%s'"))
+ if orig_maint_mode is not None:
+ maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
+ if status is None and maint_mode is None:
+ explanation = _("'status' or 'maintenance_mode' needed for "
+ "host update")
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ # Make the calls and merge the results
+ result = {'host': id}
+ if status is not None:
+ result['status'] = self._set_enabled_status(context, id, status)
+ if maint_mode is not None:
+ result['maintenance_mode'] = self._set_host_maintenance(context,
+ id, maint_mode)
return result
- def _set_host_maintenance(self, req, host, mode=True):
+ def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
- context = req.environ['nova.context']
- LOG.audit(_("Putting host %(host)s in maintenance "
+ LOG.audit(_("Putting host %(host_name)s in maintenance "
"mode %(mode)s.") % locals())
- result = self.api.set_host_maintenance(context, host, mode)
+ try:
+ result = self.api.set_host_maintenance(context, host_name, mode)
+ except NotImplementedError:
+ msg = _("Virt driver does not implement host maintenance mode.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
- return {"host": host, "maintenance_mode": result}
+ return result
- def _set_enabled_status(self, req, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- context = req.environ['nova.context']
- state = "enabled" if enabled else "disabled"
- LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
- result = self.api.set_host_enabled(context, host=host,
- enabled=enabled)
+ def _set_enabled_status(self, context, host_name, enabled):
+ """Sets the specified host's ability to accept new instances.
+ :param enabled: a boolean - if False no new VMs will be able to start
+ on the host"""
+ if enabled:
+ LOG.audit(_("Enabling host %s.") % host_name)
+ else:
+ LOG.audit(_("Disabling host %s.") % host_name)
+ try:
+ result = self.api.set_host_enabled(context, host_name=host_name,
+ enabled=enabled)
+ except NotImplementedError:
+ msg = _("Virt driver does not implement host disabled status.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
if result not in ("enabled", "disabled"):
- # An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
- return {"host": host, "status": result}
+ return result
- def _host_power_action(self, req, host, action):
+ def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
- result = self.api.host_power_action(context, host=host,
+ result = self.api.host_power_action(context, host_name=host_name,
action=action)
- except NotImplementedError as e:
- raise webob.exc.HTTPBadRequest(explanation=e.msg)
- return {"host": host, "power_action": result}
+ except NotImplementedError:
+ msg = _("Virt driver does not implement host power management.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ return {"host": host_name, "power_action": result}
@wsgi.serializers(xml=HostActionTemplate)
def startup(self, req, id):
- return self._host_power_action(req, host=id, action="startup")
+ return self._host_power_action(req, host_name=id, action="startup")
@wsgi.serializers(xml=HostActionTemplate)
def shutdown(self, req, id):
- return self._host_power_action(req, host=id, action="shutdown")
+ return self._host_power_action(req, host_name=id, action="shutdown")
@wsgi.serializers(xml=HostActionTemplate)
def reboot(self, req, id):
- return self._host_power_action(req, host=id, action="reboot")
+ return self._host_power_action(req, host_name=id, action="reboot")
+
+ @staticmethod
+ def _get_total_resources(host_name, compute_node):
+ return {'resource': {'host': host_name,
+ 'project': '(total)',
+ 'cpu': compute_node['vcpus'],
+ 'memory_mb': compute_node['memory_mb'],
+ 'disk_gb': compute_node['local_gb']}}
+
+ @staticmethod
+ def _get_used_now_resources(host_name, compute_node):
+ return {'resource': {'host': host_name,
+ 'project': '(used_now)',
+ 'cpu': compute_node['vcpus_used'],
+ 'memory_mb': compute_node['memory_mb_used'],
+ 'disk_gb': compute_node['local_gb_used']}}
+
+ @staticmethod
+ def _get_resource_totals_from_instances(host_name, instances):
+ cpu_sum = 0
+ mem_sum = 0
+ hdd_sum = 0
+ for instance in instances:
+ cpu_sum += instance['vcpus']
+ mem_sum += instance['memory_mb']
+ hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
+
+ return {'resource': {'host': host_name,
+ 'project': '(used_max)',
+ 'cpu': cpu_sum,
+ 'memory_mb': mem_sum,
+ 'disk_gb': hdd_sum}}
+
+ @staticmethod
+ def _get_resources_by_project(host_name, instances):
+ # Getting usage resource per project
+ project_map = {}
+ for instance in instances:
+ resource = project_map.setdefault(instance['project_id'],
+ {'host': host_name,
+ 'project': instance['project_id'],
+ 'cpu': 0,
+ 'memory_mb': 0,
+ 'disk_gb': 0})
+ resource['cpu'] += instance['vcpus']
+ resource['memory_mb'] += instance['memory_mb']
+ resource['disk_gb'] += (instance['root_gb'] +
+ instance['ephemeral_gb'])
+ return project_map
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
- :param context: security context
- :param host: hostname
+ :param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
@@ -229,70 +311,31 @@ class HostController(object):
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
- host = id
context = req.environ['nova.context']
- if not context.is_admin:
+ host_name = id
+ try:
+ service = self.api.service_get_by_compute_host(context, host_name)
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ except exception.AdminRequired:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
-
- # Getting compute node info and related instances info
- try:
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
- except exception.ComputeHostNotFound:
- raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
- instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
-
- # Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
- resources = [{'resource': {'host': host, 'project': '(total)',
- 'cpu': compute_ref['vcpus'],
- 'memory_mb': compute_ref['memory_mb'],
- 'disk_gb': compute_ref['local_gb']}},
- {'resource': {'host': host, 'project': '(used_now)',
- 'cpu': compute_ref['vcpus_used'],
- 'memory_mb': compute_ref['memory_mb_used'],
- 'disk_gb': compute_ref['local_gb_used']}}]
-
- cpu_sum = 0
- mem_sum = 0
- hdd_sum = 0
- for i in instance_refs:
- cpu_sum += i['vcpus']
- mem_sum += i['memory_mb']
- hdd_sum += i['root_gb'] + i['ephemeral_gb']
-
- resources.append({'resource': {'host': host,
- 'project': '(used_max)',
- 'cpu': cpu_sum,
- 'memory_mb': mem_sum,
- 'disk_gb': hdd_sum}})
-
- # Getting usage resource per project
- project_ids = [i['project_id'] for i in instance_refs]
- project_ids = list(set(project_ids))
- for project_id in project_ids:
- vcpus = [i['vcpus'] for i in instance_refs
- if i['project_id'] == project_id]
-
- mem = [i['memory_mb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- resources.append({'resource': {'host': host,
- 'project': project_id,
- 'cpu': reduce(lambda x, y: x + y, vcpus),
- 'memory_mb': reduce(lambda x, y: x + y, mem),
- 'disk_gb': reduce(lambda x, y: x + y, disk)}})
-
+ compute_node = service['compute_node'][0]
+ instances = self.api.instance_get_all_by_host(context, host_name)
+ resources = [self._get_total_resources(host_name, compute_node)]
+ resources.append(self._get_used_now_resources(host_name,
+ compute_node))
+ resources.append(self._get_resource_totals_from_instances(host_name,
+ instances))
+ by_proj_resources = self._get_resources_by_project(host_name,
+ instances)
+ for resource in by_proj_resources.itervalues():
+ resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
- """Admin-only host administration"""
+ """Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
diff --git a/nova/api/openstack/compute/contrib/hypervisors.py b/nova/api/openstack/compute/contrib/hypervisors.py
index 2760e6c19..6580212a9 100644
--- a/nova/api/openstack/compute/contrib/hypervisors.py
+++ b/nova/api/openstack/compute/contrib/hypervisors.py
@@ -241,7 +241,7 @@ class HypervisorsController(object):
class Hypervisors(extensions.ExtensionDescriptor):
- """Admin-only hypervisor administration"""
+ """Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index 7c98cb8d6..4b0afdecf 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -21,12 +21,12 @@ import datetime
import webob.exc
from nova.api.openstack import extensions
-from nova import config
from nova import db
-from nova import flags
+from nova.openstack.common import cfg
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
authorize = extensions.extension_authorizer('compute',
@@ -122,7 +122,7 @@ class InstanceUsageAuditLogController(object):
class Instance_usage_audit_log(extensions.ExtensionDescriptor):
- """Admin-only Task Log Monitoring"""
+ """Admin-only Task Log Monitoring."""
name = "OSInstanceUsageAuditLog"
alias = "os-instance_usage_audit_log"
namespace = "http://docs.openstack.org/ext/services/api/v1.1"
diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py
index e5e1e37fd..ef93fb048 100644
--- a/nova/api/openstack/compute/contrib/keypairs.py
+++ b/nova/api/openstack/compute/contrib/keypairs.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" Keypair management extension"""
+"""Keypair management extension."""
import webob
import webob.exc
@@ -49,7 +49,7 @@ class KeypairsTemplate(xmlutil.TemplateBuilder):
class KeypairController(object):
- """ Keypair API controller for the OpenStack API """
+ """Keypair API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.KeypairAPI()
@@ -187,7 +187,7 @@ class Controller(servers.Controller):
class Keypairs(extensions.ExtensionDescriptor):
- """Keypair Support"""
+ """Keypair Support."""
name = "Keypairs"
alias = "os-keypairs"
diff --git a/nova/api/openstack/compute/contrib/multinic.py b/nova/api/openstack/compute/contrib/multinic.py
index 29409c050..7017422a1 100644
--- a/nova/api/openstack/compute/contrib/multinic.py
+++ b/nova/api/openstack/compute/contrib/multinic.py
@@ -84,7 +84,7 @@ class MultinicController(wsgi.Controller):
# Note: The class name is as it has to be for this to be loaded as an
# extension--only first character capitalized.
class Multinic(extensions.ExtensionDescriptor):
- """Multiple network support"""
+ """Multiple network support."""
name = "Multinic"
alias = "NMN"
diff --git a/nova/api/openstack/compute/contrib/multiple_create.py b/nova/api/openstack/compute/contrib/multiple_create.py
index 9b3ca8a57..cbad2d439 100644
--- a/nova/api/openstack/compute/contrib/multiple_create.py
+++ b/nova/api/openstack/compute/contrib/multiple_create.py
@@ -18,7 +18,7 @@ from nova.api.openstack import extensions
class Multiple_create(extensions.ExtensionDescriptor):
- """Allow multiple create in the Create Server v1.1 API"""
+ """Allow multiple create in the Create Server v1.1 API."""
name = "MultipleCreate"
alias = "os-multiple-create"
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
new file mode 100644
index 000000000..3cdda1d76
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -0,0 +1,67 @@
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import network
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'networks_associate')
+
+
+class NetworkAssociateActionController(wsgi.Controller):
+ """Network Association API Controller."""
+
+ def __init__(self, network_api=None):
+ self.network_api = network_api or network.API()
+
+ @wsgi.action("disassociate_host")
+ def _disassociate_host_only(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Disassociating host with network with id %s"), id)
+ try:
+ self.network_api.associate(context, id, host=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ @wsgi.action("disassociate_project")
+ def _disassociate_project_only(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Disassociating project with network with id %s"), id)
+ try:
+ self.network_api.associate(context, id, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ @wsgi.action("associate_host")
+ def _associate_host(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ self.network_api.associate(context, id,
+ host=body['associate_host'])
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+
+class Networks_associate(extensions.ExtensionDescriptor):
+ """Network association support."""
+
+ name = "NetworkAssociationSupport"
+ alias = "os-networks-associate"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "networks_associate/api/v2")
+ updated = "2012-11-19T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ extension = extensions.ControllerExtension(
+ self, 'os-networks', NetworkAssociateActionController())
+
+ return [extension]
diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index 4537e1ec7..d1d172686 100644
--- a/nova/api/openstack/compute/contrib/networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -21,14 +21,15 @@ import webob
from webob import exc
from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
from nova import exception
-from nova import flags
from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'networks')
-authorize_view = extensions.extension_authorizer('compute', 'networks:view')
+authorize_view = extensions.extension_authorizer('compute',
+ 'networks:view')
def network_dict(context, network):
@@ -53,35 +54,11 @@ def network_dict(context, network):
return {}
-class NetworkController(object):
+class NetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = network_api or network.API()
- def action(self, req, id, body):
- _actions = {
- 'disassociate': self._disassociate,
- }
-
- for action, data in body.iteritems():
- try:
- return _actions[action](req, id, body)
- except KeyError:
- msg = _("Network does not have %s action") % action
- raise exc.HTTPBadRequest(explanation=msg)
-
- raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
-
- def _disassociate(self, request, network_id, body):
- context = request.environ['nova.context']
- authorize(context)
- LOG.debug(_("Disassociating network with id %s"), network_id)
- try:
- self.network_api.disassociate(context, network_id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
def index(self, req):
context = req.environ['nova.context']
authorize_view(context)
@@ -89,6 +66,18 @@ class NetworkController(object):
result = [network_dict(context, net_ref) for net_ref in networks]
return {'networks': result}
+ @wsgi.action("disassociate")
+ def _disassociate_host_and_project(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Disassociating network with id %s"), id)
+
+ try:
+ self.network_api.associate(context, id, host=None, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
def show(self, req, id):
context = req.environ['nova.context']
authorize_view(context)
@@ -161,12 +150,13 @@ class NetworkController(object):
return webob.Response(status_int=202)
-class Networks(extensions.ExtensionDescriptor):
- """Admin-only Network Management Extension"""
+class Os_networks(extensions.ExtensionDescriptor):
+ """Admin-only Network Management Extension."""
name = "Networks"
alias = "os-networks"
- namespace = "http://docs.openstack.org/compute/ext/networks/api/v1.1"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-networks/api/v1.1")
updated = "2011-12-23T00:00:00+00:00"
def get_resources(self):
diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py
new file mode 100644
index 000000000..03178ab65
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py
@@ -0,0 +1,214 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import netaddr
+import netaddr.core as netexc
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova import context as nova_context
+from nova import exception
+import nova.network
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import quota
+
+
+CONF = cfg.CONF
+
+try:
+ os_network_opts = [
+ cfg.BoolOpt("enable_network_quota",
+ default=False,
+ help="Enables or disables quotaing of tenant networks"),
+ cfg.StrOpt('use_quantum_default_nets',
+ default="False",
+ help=('Control for checking for default networks')),
+ cfg.StrOpt('quantum_default_tenant_id',
+ default="default",
+ help=('Default tenant id when creating quantum '
+ 'networks'))
+ ]
+ CONF.register_opts(os_network_opts)
+except cfg.DuplicateOptError:
+ # NOTE(jkoelker) These options are verbatim elsewhere this is here
+ # to make sure they are registered for our use.
+ pass
+
+if CONF.enable_network_quota:
+ opts = [
+ cfg.IntOpt('quota_networks',
+ default=3,
+ help='number of private networks allowed per project'),
+ ]
+ CONF.register_opts(opts)
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
+
+
+def network_dict(network):
+ return {"id": network.get("uuid") or network["id"],
+ "cidr": network["cidr"],
+ "label": network["label"]}
+
+
+class NetworkController(object):
+ def __init__(self, network_api=None):
+ self.network_api = nova.network.API()
+ self._default_networks = []
+
+ def _refresh_default_networks(self):
+ self._default_networks = []
+ if CONF.use_quantum_default_nets == "True":
+ try:
+ self._default_networks = self._get_default_networks()
+ except Exception:
+ LOG.exception("Failed to get default networks")
+
+ def _get_default_networks(self):
+ project_id = CONF.quantum_default_tenant_id
+ ctx = nova_context.RequestContext(user_id=None,
+ project_id=project_id)
+ networks = {}
+ for n in self.network_api.get_all(ctx):
+ networks[n['id']] = n['label']
+ return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+
+ def index(self, req):
+ context = req.environ['nova.context']
+ authorize(context)
+ networks = self.network_api.get_all(context)
+ if not self._default_networks:
+ self._refresh_default_networks()
+ networks.extend(self._default_networks)
+ return {'networks': [network_dict(n) for n in networks]}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Showing network with id %s") % id)
+ try:
+ network = self.network_api.get(context, id)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return network_dict(network)
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=-1)
+ except Exception:
+ reservation = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "network."))
+
+ LOG.info(_("Deleting network with id %s") % id)
+
+ try:
+ self.network_api.delete(context, id)
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.commit(context, reservation)
+ response = exc.HTTPAccepted()
+ except exception.NetworkNotFound:
+ response = exc.HTTPNotFound(_("Network not found"))
+
+ return response
+
+ def create(self, req, body):
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ network = body["network"]
+ keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
+ "num_networks"]
+ kwargs = dict((k, network.get(k)) for k in keys)
+
+ label = network["label"]
+
+ if not (kwargs["cidr"] or kwargs["cidr_v6"]):
+ msg = _("No CIDR requested")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if kwargs["cidr"]:
+ try:
+ net = netaddr.IPNetwork(kwargs["cidr"])
+ if net.size < 4:
+ msg = _("Requested network does not contain "
+ "enough (2+) usable hosts")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrFormatError:
+ msg = _("CIDR is malformed.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrConversionError:
+ msg = _("Address could not be converted.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ networks = []
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=1)
+ except exception.OverQuota:
+ msg = _("Quota exceeded, too many networks.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ networks = self.network_api.create(context,
+ label=label, **kwargs)
+ if CONF.enable_network_quota:
+ QUOTAS.commit(context, reservation)
+ except Exception:
+ if CONF.enable_network_quota:
+ QUOTAS.rollback(context, reservation)
+ msg = _("Create networks failed")
+ LOG.exception(msg, extra=network)
+ raise exc.HTTPServiceUnavailable(explanation=msg)
+ return {"network": network_dict(networks[0])}
+
+
+class Os_tenant_networks(extensions.ExtensionDescriptor):
+ """Tenant-based Network Management Extension."""
+
+ name = "OSTenantNetworks"
+ alias = "os-tenant-networks"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-tenant-networks/api/v2")
+ updated = "2012-03-07T09:46:43-05:00"
+
+ def get_resources(self):
+ ext = extensions.ResourceExtension('os-tenant-networks',
+ NetworkController())
+ return [ext]
+
+
+def _sync_networks(context, project_id, session):
+ ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
+ ctx = ctx.elevated()
+ networks = nova.network.api.API().get_all(ctx)
+ return dict(networks=len(networks))
+
+
+if CONF.enable_network_quota:
+ QUOTAS.register_resource(quota.ReservableResource('networks',
+ _sync_networks,
+ 'quota_networks'))
diff --git a/nova/api/openstack/compute/contrib/quota_classes.py b/nova/api/openstack/compute/contrib/quota_classes.py
index 437b58005..f3f5b9b08 100644
--- a/nova/api/openstack/compute/contrib/quota_classes.py
+++ b/nova/api/openstack/compute/contrib/quota_classes.py
@@ -45,7 +45,7 @@ class QuotaClassTemplate(xmlutil.TemplateBuilder):
class QuotaClassSetsController(object):
def _format_quota_set(self, quota_class, quota_set):
- """Convert the quota object to a result dict"""
+ """Convert the quota object to a result dict."""
result = dict(id=str(quota_class))
@@ -84,7 +84,7 @@ class QuotaClassSetsController(object):
class Quota_classes(extensions.ExtensionDescriptor):
- """Quota classes management support"""
+ """Quota classes management support."""
name = "QuotaClasses"
alias = "os-quota-class-sets"
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index 33584badc..bdf82ea86 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -48,7 +48,7 @@ class QuotaTemplate(xmlutil.TemplateBuilder):
class QuotaSetsController(object):
def _format_quota_set(self, project_id, quota_set):
- """Convert the quota object to a result dict"""
+ """Convert the quota object to a result dict."""
result = dict(id=str(project_id))
@@ -106,7 +106,7 @@ class QuotaSetsController(object):
class Quotas(extensions.ExtensionDescriptor):
- """Quotas management support"""
+ """Quotas management support."""
name = "Quotas"
alias = "os-quota-sets"
diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py
index 054eaf870..29135418e 100644
--- a/nova/api/openstack/compute/contrib/rescue.py
+++ b/nova/api/openstack/compute/contrib/rescue.py
@@ -21,14 +21,13 @@ from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = exts.extension_authorizer('compute', 'rescue')
@@ -55,7 +54,7 @@ class RescueController(wsgi.Controller):
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
- password = utils.generate_password(CONF.password_length)
+ password = utils.generate_password()
instance = self._get_instance(context, id)
try:
@@ -82,7 +81,7 @@ class RescueController(wsgi.Controller):
class Rescue(exts.ExtensionDescriptor):
- """Instance rescue mode"""
+ """Instance rescue mode."""
name = "Rescue"
alias = "os-rescue"
diff --git a/nova/api/openstack/compute/contrib/scheduler_hints.py b/nova/api/openstack/compute/contrib/scheduler_hints.py
index f5c10fea1..5e54fbd37 100644
--- a/nova/api/openstack/compute/contrib/scheduler_hints.py
+++ b/nova/api/openstack/compute/contrib/scheduler_hints.py
@@ -53,7 +53,7 @@ class SchedulerHintsController(wsgi.Controller):
class Scheduler_hints(extensions.ExtensionDescriptor):
- """Pass arbitrary key/value pairs to the scheduler"""
+ """Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = "OS-SCH-HNT"
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index b86397694..a15c395ae 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -29,7 +29,6 @@ from nova import compute
from nova.compute import api as compute_api
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -108,7 +107,7 @@ class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
- """Deserialize an xml-formatted security group create request"""
+ """Deserialize an xml-formatted security group create request."""
dom = minidom.parseString(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
@@ -129,13 +128,13 @@ class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
def default(self, string):
- """Deserialize an xml-formatted security group create request"""
+ """Deserialize an xml-formatted security group create request."""
dom = minidom.parseString(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
- """Marshal the security group rule attribute of a parsed request"""
+ """Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
@@ -182,30 +181,30 @@ class SecurityGroupControllerBase(object):
def _format_security_group_rule(self, context, rule):
sg_rule = {}
- sg_rule['id'] = rule.id
- sg_rule['parent_group_id'] = rule.parent_group_id
- sg_rule['ip_protocol'] = rule.protocol
- sg_rule['from_port'] = rule.from_port
- sg_rule['to_port'] = rule.to_port
+ sg_rule['id'] = rule['id']
+ sg_rule['parent_group_id'] = rule['parent_group_id']
+ sg_rule['ip_protocol'] = rule['protocol']
+ sg_rule['from_port'] = rule['from_port']
+ sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
- if rule.group_id:
+ if rule['group_id']:
source_group = self.security_group_api.get(context,
- id=rule.group_id)
+ id=rule['group_id'])
sg_rule['group'] = {'name': source_group.name,
'tenant_id': source_group.project_id}
else:
- sg_rule['ip_range'] = {'cidr': rule.cidr}
+ sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
- security_group['id'] = group.id
- security_group['description'] = group.description
- security_group['name'] = group.name
- security_group['tenant_id'] = group.project_id
+ security_group['id'] = group['id']
+ security_group['description'] = group['description']
+ security_group['name'] = group['name']
+ security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
- for rule in group.rules:
+ for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
@@ -262,7 +261,7 @@ class SecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
- """Returns a list of security groups"""
+ """Returns a list of security groups."""
context = self._authorize_context(req)
search_opts = {}
@@ -523,7 +522,7 @@ class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
class Security_groups(extensions.ExtensionDescriptor):
- """Security group support"""
+ """Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py
index 2bd020e43..1be4b664c 100644
--- a/nova/api/openstack/compute/contrib/server_diagnostics.py
+++ b/nova/api/openstack/compute/contrib/server_diagnostics.py
@@ -50,7 +50,7 @@ class ServerDiagnosticsController(object):
class Server_diagnostics(extensions.ExtensionDescriptor):
- """Allow Admins to view server diagnostics through server action"""
+ """Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
diff --git a/nova/api/openstack/compute/contrib/server_password.py b/nova/api/openstack/compute/contrib/server_password.py
new file mode 100644
index 000000000..0fd620fb8
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/server_password.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The server password extension."""
+
+import webob
+
+from nova.api.metadata import password
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import exception
+
+
+authorize = extensions.extension_authorizer('compute', 'server_password')
+
+
+class ServerPasswordTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('password', selector='password')
+ root.text = unicode
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class ServerPasswordController(object):
+ """The flavor access API controller for the OpenStack API."""
+ def __init__(self):
+ self.compute_api = compute.API()
+
+ def _get_instance(self, context, server_id):
+ try:
+ return self.compute_api.get(context, server_id)
+ except exception.InstanceNotFound as exp:
+ raise webob.exc.HTTPNotFound(explanation=unicode(exp))
+
+ @wsgi.serializers(xml=ServerPasswordTemplate)
+ def index(self, req, server_id):
+ context = req.environ['nova.context']
+ authorize(context)
+ instance = self._get_instance(context, server_id)
+
+ passw = password.extract_password(instance)
+ return {'password': passw or ''}
+
+ @wsgi.response(204)
+ def delete(self, req, server_id):
+ context = req.environ['nova.context']
+ authorize(context)
+ instance = self._get_instance(context, server_id)
+ password.set_password(context, instance['uuid'], None)
+
+
+class Server_password(extensions.ExtensionDescriptor):
+ """Server password support."""
+
+ name = "ServerPassword"
+ alias = "os-server-password"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "server-password/api/v2")
+ updated = "2012-11-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension(
+ 'os-server-password',
+ controller=ServerPasswordController(),
+ collection_actions={'delete': 'DELETE'},
+ parent=dict(member_name='server', collection_name='servers'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/compute/contrib/server_start_stop.py b/nova/api/openstack/compute/contrib/server_start_stop.py
index 049fa393b..733972083 100644
--- a/nova/api/openstack/compute/contrib/server_start_stop.py
+++ b/nova/api/openstack/compute/contrib/server_start_stop.py
@@ -40,7 +40,7 @@ class ServerStartStopActionController(wsgi.Controller):
@wsgi.action('os-start')
def _start_server(self, req, id, body):
- """Start an instance. """
+ """Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('start instance'), instance=instance)
@@ -58,7 +58,7 @@ class ServerStartStopActionController(wsgi.Controller):
class Server_start_stop(extensions.ExtensionDescriptor):
- """Start/Stop instance compute API support"""
+ """Start/Stop instance compute API support."""
name = "ServerStartStop"
alias = "os-server-start-stop"
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
new file mode 100644
index 000000000..2786ad814
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -0,0 +1,144 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import availability_zones
+from nova import db
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova import utils
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'services')
+CONF = cfg.CONF
+CONF.import_opt('service_down_time', 'nova.service')
+
+
+class ServicesIndexTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('services')
+ elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
+ elem.set('binary')
+ elem.set('host')
+ elem.set('zone')
+ elem.set('status')
+ elem.set('state')
+ elem.set('update_at')
+
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('host')
+ root.set('host')
+ root.set('service')
+ root.set('disabled')
+
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class ServiceController(object):
+ @wsgi.serializers(xml=ServicesIndexTemplate)
+ def index(self, req):
+ """
+ Return a list of all running services. Filter by host & service name.
+ """
+ context = req.environ['nova.context']
+ authorize(context)
+ now = timeutils.utcnow()
+ services = db.service_get_all(context)
+ services = availability_zones.set_availability_zones(context, services)
+
+ host = ''
+ if 'host' in req.GET:
+ host = req.GET['host']
+ service = ''
+ if 'service' in req.GET:
+ service = req.GET['service']
+ if host:
+ services = [s for s in services if s['host'] == host]
+ if service:
+ services = [s for s in services if s['binary'] == service]
+
+ svcs = []
+ for svc in services:
+ delta = now - (svc['updated_at'] or svc['created_at'])
+ alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
+ art = (alive and "up") or "down"
+ active = 'enabled'
+ if svc['disabled']:
+ active = 'disabled'
+ svcs.append({"binary": svc['binary'], 'host': svc['host'],
+ 'zone': svc['availability_zone'],
+ 'status': active, 'state': art,
+ 'updated_at': svc['updated_at']})
+ return {'services': svcs}
+
+ @wsgi.serializers(xml=ServicesUpdateTemplate)
+ def update(self, req, id, body):
+ """Enable/Disable scheduling for a service."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ if id == "enable":
+ disabled = False
+ elif id == "disable":
+ disabled = True
+ else:
+ raise webob.exc.HTTPNotFound("Unknown action")
+
+ try:
+ host = body['host']
+ service = body['service']
+ except (TypeError, KeyError):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ try:
+ svc = db.service_get_by_args(context, host, service)
+ if not svc:
+ raise webob.exc.HTTPNotFound('Unknown service')
+
+ db.service_update(context, svc['id'], {'disabled': disabled})
+ except exception.ServiceNotFound:
+ raise webob.exc.HTTPNotFound("service not found")
+
+ return {'host': host, 'service': service, 'disabled': disabled}
+
+
+class Services(extensions.ExtensionDescriptor):
+ """Services support."""
+
+ name = "Services"
+ alias = "os-services"
+ namespace = "http://docs.openstack.org/compute/ext/services/api/v2"
+ updated = "2012-10-28T00:00:00-00:00"
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('os-services',
+ ServiceController())
+ resources.append(resource)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index f6e9a63f6..8502e93c4 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -23,7 +23,6 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
authorize_show = extensions.extension_authorizer('compute',
@@ -210,7 +209,7 @@ class SimpleTenantUsageController(object):
@wsgi.serializers(xml=SimpleTenantUsagesTemplate)
def index(self, req):
- """Retrieve tenant_usage for all tenants"""
+ """Retrieve tenant_usage for all tenants."""
context = req.environ['nova.context']
authorize_list(context)
@@ -227,7 +226,7 @@ class SimpleTenantUsageController(object):
@wsgi.serializers(xml=SimpleTenantUsageTemplate)
def show(self, req, id):
- """Retrieve tenant_usage for a specified tenant"""
+ """Retrieve tenant_usage for a specified tenant."""
tenant_id = id
context = req.environ['nova.context']
@@ -250,7 +249,7 @@ class SimpleTenantUsageController(object):
class Simple_tenant_usage(extensions.ExtensionDescriptor):
- """Simple tenant usage extension"""
+ """Simple tenant usage extension."""
name = "SimpleTenantUsage"
alias = "os-simple-tenant-usage"
diff --git a/nova/api/openstack/compute/contrib/used_limits.py b/nova/api/openstack/compute/contrib/used_limits.py
index 020f6d8e4..a5e0b378b 100644
--- a/nova/api/openstack/compute/contrib/used_limits.py
+++ b/nova/api/openstack/compute/contrib/used_limits.py
@@ -39,6 +39,13 @@ class UsedLimitsTemplate(xmlutil.TemplateBuilder):
class UsedLimitsController(wsgi.Controller):
+ @staticmethod
+ def _reserved(req):
+ try:
+ return int(req.GET['reserved'])
+ except (ValueError, KeyError):
+ return False
+
@wsgi.extends
def index(self, req, resp_obj):
resp_obj.attach(xml=UsedLimitsTemplate())
@@ -49,15 +56,15 @@ class UsedLimitsController(wsgi.Controller):
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
- 'totalVolumesUsed': 'volumes',
- 'totalVolumeGigabytesUsed': 'gigabytes',
- 'totalSecurityGroupsUsed': 'floating_ips',
- 'totalKeyPairsUsed': 'key_pairs',
+ 'totalFloatingIpsUsed': 'floating_ips',
+ 'totalSecurityGroupsUsed': 'security_groups',
}
used_limits = {}
for display_name, quota in quota_map.iteritems():
if quota in quotas:
- used_limits[display_name] = quotas[quota]['in_use']
+ reserved = (quotas[quota]['reserved']
+ if self._reserved(req) else 0)
+ used_limits[display_name] = quotas[quota]['in_use'] + reserved
resp_obj.obj['limits']['absolute'].update(used_limits)
diff --git a/nova/api/openstack/compute/contrib/user_data.py b/nova/api/openstack/compute/contrib/user_data.py
index debd1176e..1f58c102d 100644
--- a/nova/api/openstack/compute/contrib/user_data.py
+++ b/nova/api/openstack/compute/contrib/user_data.py
@@ -18,7 +18,7 @@ from nova.api.openstack import extensions
class User_data(extensions.ExtensionDescriptor):
- """Add user_data to the Create Server v1.1 API"""
+ """Add user_data to the Create Server v1.1 API."""
name = "UserData"
alias = "os-user-data"
diff --git a/nova/api/openstack/compute/contrib/virtual_interfaces.py b/nova/api/openstack/compute/contrib/virtual_interfaces.py
index 1e5d0342d..50c26d754 100644
--- a/nova/api/openstack/compute/contrib/virtual_interfaces.py
+++ b/nova/api/openstack/compute/contrib/virtual_interfaces.py
@@ -77,7 +77,7 @@ class ServerVirtualInterfaceController(object):
class Virtual_interfaces(extensions.ExtensionDescriptor):
- """Virtual interface support"""
+ """Virtual interface support."""
name = "VirtualInterfaces"
alias = "os-virtual-interfaces"
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 1de6134ad..47c717495 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -25,8 +25,8 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
from nova import utils
from nova import volume
@@ -127,20 +127,20 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
- volume = {}
+ vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
- volume[attr] = volume_node.getAttribute(attr)
+ vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
- volume['metadata'] = self.extract_metadata(metadata_node)
+ vol['metadata'] = self.extract_metadata(metadata_node)
- return volume
+ return vol
class CreateDeserializer(CommonDeserializer):
@@ -153,8 +153,8 @@ class CreateDeserializer(CommonDeserializer):
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = minidom.parseString(string)
- volume = self._extract_volume(dom)
- return {'body': {'volume': volume}}
+ vol = self._extract_volume(dom)
+ return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
@@ -185,8 +185,8 @@ class VolumeController(wsgi.Controller):
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
- volume = self.volume_api.get(context, id)
- self.volume_api.delete(context, volume)
+ vol = self.volume_api.get(context, id)
+ self.volume_api.delete(context, vol)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@@ -366,6 +366,12 @@ class VolumeAttachmentController(wsgi.Controller):
instance['uuid'],
assigned_mountpoint)}
+ def _validate_volume_id(self, volume_id):
+ if not uuidutils.is_uuid_like(volume_id):
+ msg = _("Bad volumeId format: volumeId is "
+ "not in proper format (%s)") % volume_id
+ raise exc.HTTPBadRequest(explanation=msg)
+
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
@@ -378,6 +384,8 @@ class VolumeAttachmentController(wsgi.Controller):
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
+ self._validate_volume_id(volume_id)
+
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
@@ -581,7 +589,7 @@ class SnapshotController(wsgi.Controller):
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
- volume = self.volume_api.get(context, volume_id)
+ vol = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
LOG.audit(_("Create snapshot from volume %s"), volume_id,
@@ -593,12 +601,12 @@ class SnapshotController(wsgi.Controller):
if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
- volume,
+ vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
- volume,
+ vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
@@ -608,7 +616,7 @@ class SnapshotController(wsgi.Controller):
class Volumes(extensions.ExtensionDescriptor):
- """Volumes support"""
+ """Volumes support."""
name = "Volumes"
alias = "os-volumes"
diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py
index c46a6b034..1d0738417 100644
--- a/nova/api/openstack/compute/extensions.py
+++ b/nova/api/openstack/compute/extensions.py
@@ -16,14 +16,21 @@
# under the License.
from nova.api.openstack import extensions as base_extensions
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
+ext_opts = [
+ cfg.MultiStrOpt('osapi_compute_extension',
+ default=[
+ 'nova.api.openstack.compute.contrib.standard_extensions'
+ ],
+ help='osapi compute extension to load'),
+]
+CONF = cfg.CONF
+CONF.register_opts(ext_opts)
LOG = logging.getLogger(__name__)
-CONF = config.CONF
class ExtensionManager(base_extensions.ExtensionManager):
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 3bc817076..1a467f3a7 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -20,12 +20,11 @@ from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
-from nova import flags
from nova.image import glance
class Controller(object):
- """The image metadata API controller for the OpenStack API"""
+ """The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_service = glance.get_default_image_service()
@@ -39,7 +38,7 @@ class Controller(object):
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, image_id):
- """Returns the list of metadata for a given instance"""
+ """Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 0c280618e..7dda64f87 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -20,7 +20,6 @@ from nova.api.openstack.compute.views import images as views_images
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
-from nova import flags
import nova.image.glance
from nova.openstack.common import log as logging
import nova.utils
diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py
index ec9759759..7b196d956 100644
--- a/nova/api/openstack/compute/ips.py
+++ b/nova/api/openstack/compute/ips.py
@@ -22,7 +22,6 @@ from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index c0ef65670..767280a45 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -212,6 +212,7 @@ DEFAULT_LIMITS = [
Limit("PUT", "*", ".*", 10, PER_MINUTE),
Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
Limit("DELETE", "*", ".*", 100, PER_MINUTE),
+ Limit("GET", "*/os-fping", "^/os-fping", 12, PER_HOUR),
]
diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py
index 4e5a3ee02..023a054d0 100644
--- a/nova/api/openstack/compute/server_metadata.py
+++ b/nova/api/openstack/compute/server_metadata.py
@@ -24,7 +24,7 @@ from nova import exception
class Controller(object):
- """ The server metadata API controller for the OpenStack API """
+ """The server metadata API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
@@ -45,7 +45,7 @@ class Controller(object):
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, server_id):
- """ Returns the list of metadata for a given instance """
+ """Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
return {'metadata': self._get_metadata(context, server_id)}
@@ -138,7 +138,7 @@ class Controller(object):
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, server_id, id):
- """ Return a single metadata item """
+ """Return a single metadata item."""
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
@@ -150,7 +150,7 @@ class Controller(object):
@wsgi.response(204)
def delete(self, req, server_id, id):
- """ Deletes an existing metadata """
+ """Deletes an existing metadata."""
context = req.environ['nova.context']
metadata = self._get_metadata(context, server_id)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index d8d2f1c28..f0fdb5a15 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -30,9 +30,8 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
@@ -41,8 +40,18 @@ from nova.openstack.common import uuidutils
from nova import utils
+server_opts = [
+ cfg.BoolOpt('enable_instance_password',
+ default=True,
+ help='Allows use of instance password during '
+ 'server creation'),
+]
+CONF = cfg.CONF
+CONF.register_opts(server_opts)
+CONF.import_opt('network_api_class', 'nova.network')
+CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
+
LOG = logging.getLogger(__name__)
-CONF = config.CONF
def make_fault(elem):
@@ -214,10 +223,14 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
if auto_disk_config:
server['OS-DCF:diskConfig'] = utils.bool_from_str(auto_disk_config)
+ config_drive = server_node.getAttribute('config_drive')
+ if config_drive:
+ server['config_drive'] = config_drive
+
return server
def _extract_block_device_mapping(self, server_node):
- """Marshal the block_device_mapping node of a parsed request"""
+ """Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
@@ -242,7 +255,7 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
return None
def _extract_scheduler_hints(self, server_node):
- """Marshal the scheduler hints attribute of a parsed request"""
+ """Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
@@ -443,9 +456,6 @@ class Controller(wsgi.Controller):
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=str(err))
- except exception.NotFound:
- msg = _("Instance could not be found")
- raise exc.HTTPNotFound(explanation=msg)
return servers
@wsgi.serializers(xml=ServersTemplate)
@@ -455,9 +465,6 @@ class Controller(wsgi.Controller):
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=str(err))
- except exception.NotFound as err:
- msg = _("Instance could not be found")
- raise exc.HTTPNotFound(explanation=msg)
return servers
def _add_instance_faults(self, ctxt, instances):
@@ -517,12 +524,7 @@ class Controller(wsgi.Controller):
msg = _("Only administrators may list deleted instances")
raise exc.HTTPBadRequest(explanation=msg)
- # NOTE(dprince) This prevents computes' get_all() from returning
- # instances from multiple tenants when an admin accounts is used.
- # By default non-admin accounts are always limited to project/user
- # both here and in the compute API.
- if not context.is_admin or (context.is_admin and 'all_tenants'
- not in search_opts):
+ if 'all_tenants' not in search_opts:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
@@ -536,7 +538,10 @@ class Controller(wsgi.Controller):
marker=marker)
except exception.MarkerNotFound as e:
msg = _('marker [%s] not found') % marker
- raise webob.exc.HTTPBadRequest(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.FlavorNotFound as e:
+ msg = _("Flavor could not be found")
+ raise exc.HTTPUnprocessableEntity(explanation=msg)
if is_detail:
self._add_instance_faults(context, instance_list)
@@ -742,8 +747,7 @@ class Controller(wsgi.Controller):
self._validate_server_name(name)
name = name.strip()
- image_href = self._image_ref_from_req_data(body)
- image_href = self._image_uuid_from_href(image_href)
+ image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
@@ -824,21 +828,24 @@ class Controller(wsgi.Controller):
try:
min_count = int(min_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('min_count must be an '
- 'integer value'))
+ msg = _('min_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count < 1:
- raise webob.exc.HTTPBadRequest(_('min_count must be > 0'))
+ msg = _('min_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
max_count = int(max_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('max_count must be an '
- 'integer value'))
+ msg = _('max_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if max_count < 1:
- raise webob.exc.HTTPBadRequest(_('max_count must be > 0'))
+ msg = _('max_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count > max_count:
- raise webob.exc.HTTPBadRequest(_('min_count must be <= max_count'))
+ msg = _('min_count must be <= max_count')
+ raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
@@ -854,7 +861,7 @@ class Controller(wsgi.Controller):
(instances, resv_id) = self.compute_api.create(context,
inst_type,
- image_href,
+ image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
@@ -975,6 +982,10 @@ class Controller(wsgi.Controller):
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
+ if 'personality' in body['server']:
+ msg = _("Personality cannot be updated.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
try:
instance = self.compute_api.get(ctxt, id)
req.cache_db_instance(instance)
@@ -1020,6 +1031,9 @@ class Controller(wsgi.Controller):
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.InstanceTypeNotFound:
+ msg = _("Flavor used by the instance could not be found.")
+ raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
@@ -1107,6 +1121,24 @@ class Controller(wsgi.Controller):
return image_uuid
+ def _image_from_req_data(self, data):
+ """
+ Get image data from the request or raise appropriate
+ exceptions
+
+ If no image is supplied - checks to see if there is
+ block devices set and proper extesions loaded.
+ """
+ image_ref = data['server'].get('imageRef')
+ bdm = data['server'].get('block_device_mapping')
+
+ if not image_ref and bdm and self.ext_mgr.is_loaded('os-volumes'):
+ return ''
+ else:
+ image_href = self._image_ref_from_req_data(data)
+ image_uuid = self._image_uuid_from_href(image_href)
+ return image_uuid
+
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
@@ -1173,7 +1205,8 @@ class Controller(wsgi.Controller):
try:
body = body['rebuild']
except (KeyError, TypeError):
- raise exc.HTTPBadRequest(_("Invalid request body"))
+ msg = _('Invalid request body')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
image_href = body["imageRef"]
@@ -1186,7 +1219,7 @@ class Controller(wsgi.Controller):
try:
password = body['adminPass']
except (KeyError, TypeError):
- password = utils.generate_password(CONF.password_length)
+ password = utils.generate_password()
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
@@ -1328,7 +1361,7 @@ class Controller(wsgi.Controller):
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
- password = utils.generate_password(CONF.password_length)
+ password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
@@ -1341,7 +1374,7 @@ class Controller(wsgi.Controller):
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
- 'changes-since')
+ 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
diff --git a/nova/api/openstack/compute/versions.py b/nova/api/openstack/compute/versions.py
index 76e37cf41..5c416908e 100644
--- a/nova/api/openstack/compute/versions.py
+++ b/nova/api/openstack/compute/versions.py
@@ -26,9 +26,9 @@ from nova.openstack.common import timeutils
LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
+ 'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
+ 'api/openstack-compute/2/wadl/os-compute-2.wadl'
},
}
diff --git a/nova/api/openstack/compute/views/addresses.py b/nova/api/openstack/compute/views/addresses.py
index ec5fda64a..e1d75a9aa 100644
--- a/nova/api/openstack/compute/views/addresses.py
+++ b/nova/api/openstack/compute/views/addresses.py
@@ -18,7 +18,6 @@
import itertools
from nova.api.openstack import common
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py
index d1d7d008f..6438f0aa9 100644
--- a/nova/api/openstack/compute/views/images.py
+++ b/nova/api/openstack/compute/views/images.py
@@ -18,11 +18,7 @@
import os.path
from nova.api.openstack import common
-from nova import config
-from nova import flags
-from nova import utils
-
-CONF = config.CONF
+from nova.image import glance
class ViewBuilder(common.ViewBuilder):
@@ -121,9 +117,8 @@ class ViewBuilder(common.ViewBuilder):
def _get_alternate_link(self, request, identifier):
"""Create an alternate link for a specific image id."""
- glance_url = utils.generate_glance_url()
- glance_url = self._update_link_prefix(glance_url,
- CONF.osapi_glance_link_prefix)
+ glance_url = glance.generate_glance_url()
+ glance_url = self._update_glance_link_prefix(glance_url)
return os.path.join(glance_url,
request.environ["nova.context"].project_id,
self._collection_name,
diff --git a/nova/api/openstack/compute/views/limits.py b/nova/api/openstack/compute/views/limits.py
index 4b72f26e7..4ccf40de7 100644
--- a/nova/api/openstack/compute/views/limits.py
+++ b/nova/api/openstack/compute/views/limits.py
@@ -47,8 +47,6 @@ class ViewBuilder(object):
"ram": ["maxTotalRAMSize"],
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
- "gigabytes": ["maxTotalVolumeGigabytes"],
- "volumes": ["maxTotalVolumes"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index b423b37d4..d281f6a61 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -164,17 +164,20 @@ class ViewBuilder(common.ViewBuilder):
def _get_image(self, request, instance):
image_ref = instance["image_ref"]
- image_id = str(common.get_id_from_href(image_ref))
- bookmark = self._image_builder._get_bookmark_link(request,
- image_id,
- "images")
- return {
- "id": image_id,
- "links": [{
- "rel": "bookmark",
- "href": bookmark,
- }],
- }
+ if image_ref:
+ image_id = str(common.get_id_from_href(image_ref))
+ bookmark = self._image_builder._get_bookmark_link(request,
+ image_id,
+ "images")
+ return {
+ "id": image_id,
+ "links": [{
+ "rel": "bookmark",
+ "href": bookmark,
+ }],
+ }
+ else:
+ return ""
def _get_flavor(self, request, instance):
instance_type = instance["instance_type"]
diff --git a/nova/api/openstack/compute/views/versions.py b/nova/api/openstack/compute/views/versions.py
index 826c8b4a5..105921ff0 100644
--- a/nova/api/openstack/compute/views/versions.py
+++ b/nova/api/openstack/compute/views/versions.py
@@ -19,11 +19,6 @@ import copy
import os
from nova.api.openstack import common
-from nova import config
-from nova import flags
-
-
-CONF = config.CONF
def get_view_builder(req):
@@ -93,8 +88,7 @@ class ViewBuilder(common.ViewBuilder):
def generate_href(self, path=None):
"""Create an url that refers to a specific version_number."""
- prefix = self._update_link_prefix(self.base_url,
- CONF.osapi_compute_link_prefix)
+ prefix = self._update_compute_link_prefix(self.base_url)
version_number = 'v2'
if path:
path = path.strip('/')
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 298e98603..a94065ab0 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -25,7 +25,6 @@ import nova.api.openstack
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
-from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.policy
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index bfe0ec599..519669134 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -181,7 +181,7 @@ class ActionDispatcher(object):
class TextDeserializer(ActionDispatcher):
- """Default request body deserialization"""
+ """Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
@@ -245,7 +245,7 @@ class XMLDeserializer(TextDeserializer):
return result
def find_first_child_named_in_namespace(self, parent, namespace, name):
- """Search a nodes children for the first child with a given name"""
+ """Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if (node.localName == name and
node.namespaceURI and
@@ -254,20 +254,20 @@ class XMLDeserializer(TextDeserializer):
return None
def find_first_child_named(self, parent, name):
- """Search a nodes children for the first child with a given name"""
+ """Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if node.localName == name:
return node
return None
def find_children_named(self, parent, name):
- """Return all of a nodes children who have the given name"""
+ """Return all of a nodes children who have the given name."""
for node in parent.childNodes:
if node.localName == name:
yield node
def extract_text(self, node):
- """Get the text field contained by the given node"""
+ """Get the text field contained by the given node."""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
@@ -275,7 +275,7 @@ class XMLDeserializer(TextDeserializer):
return ""
def extract_elements(self, node):
- """Get only Element type childs from node"""
+ """Get only Element type childs from node."""
elements = []
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
@@ -283,7 +283,7 @@ class XMLDeserializer(TextDeserializer):
return elements
def find_attribute_or_element(self, parent, name):
- """Get an attribute value; fallback to an element if not found"""
+ """Get an attribute value; fallback to an element if not found."""
if parent.hasAttribute(name):
return parent.getAttribute(name)
@@ -300,7 +300,7 @@ class XMLDeserializer(TextDeserializer):
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
- """Marshal the metadata attribute of a parsed request"""
+ """Marshal the metadata attribute of a parsed request."""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
@@ -310,7 +310,7 @@ class MetadataXMLDeserializer(XMLDeserializer):
class DictSerializer(ActionDispatcher):
- """Default request body serialization"""
+ """Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
@@ -320,7 +320,7 @@ class DictSerializer(ActionDispatcher):
class JSONDictSerializer(DictSerializer):
- """Default JSON request body serialization"""
+ """Default JSON request body serialization."""
def default(self, data):
return jsonutils.dumps(data)
@@ -1016,7 +1016,8 @@ class Resource(wsgi.Application):
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
- action not in ['action', 'create', 'delete']):
+ action not in ['action', 'create', 'delete', 'update',
+ 'show']):
# Propagate the error
raise
else:
@@ -1180,8 +1181,9 @@ class Fault(webob.exc.HTTPException):
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
- retry = self.wrapped_exc.headers['Retry-After']
- fault_data[fault_name]['retryAfter'] = retry
+ retry = self.wrapped_exc.headers.get('Retry-After', None)
+ if retry:
+ fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 1d22e74fc..77ab4415c 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -21,8 +21,6 @@ Request Body limiting middleware.
import webob.dec
import webob.exc
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi
@@ -34,13 +32,13 @@ max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
help='the maximum body size '
'per each osapi request(bytes)')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(max_request_body_size_opt)
LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
- """Add a 'nova.context' to WSGI environ."""
+ """Limit the size of incoming requests."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
diff --git a/nova/api/validator.py b/nova/api/validator.py
index beee370b9..ddcc3ed2a 100644
--- a/nova/api/validator.py
+++ b/nova/api/validator.py
@@ -101,7 +101,7 @@ def validate_ipv4(addr):
def validate_user_data(user_data):
- """Check if the user_data is encoded properly"""
+ """Check if the user_data is encoded properly."""
try:
user_data = base64.b64decode(user_data)
except TypeError:
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
new file mode 100644
index 000000000..cb5cce591
--- /dev/null
+++ b/nova/availability_zones.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""utilities for multiple APIs."""
+
+from nova import db
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+
+availability_zone_opts = [
+ cfg.StrOpt('internal_service_availability_zone',
+ default='internal',
+ help='availability_zone to show internal services under'),
+ cfg.StrOpt('default_availability_zone',
+ # deprecated in Grizzly release
+ deprecated_name='node_availability_zone',
+ default='nova',
+ help='default compute node availability_zone'),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(availability_zone_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+def set_availability_zones(context, services):
+ # Makes sure services isn't a sqlalchemy object
+ services = [dict(service.iteritems()) for service in services]
+ metadata = db.aggregate_host_get_by_metadata_key(context,
+ key='availability_zone')
+ for service in services:
+ az = CONF.internal_service_availability_zone
+ if service['topic'] == "compute":
+ if metadata.get(service['host']):
+ az = str(metadata[service['host']])[5:-2]
+ else:
+ az = CONF.default_availability_zone
+ service['availability_zone'] = az
+ return services
+
+
+def get_host_availability_zone(context, host):
+ metadata = db.aggregate_metadata_get_by_host(
+ context.get_admin_context(), host, key='availability_zone')
+ if 'availability_zone' in metadata:
+ return list(metadata['availability_zone'])[0]
+ else:
+ return CONF.default_availability_zone
diff --git a/nova/block_device.py b/nova/block_device.py
index fa59fe325..c95961911 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -17,10 +17,7 @@
import re
-from nova import config
-from nova import flags
-
-CONF = config.CONF
+from nova.virt import driver
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
@@ -65,7 +62,7 @@ def is_swap_or_ephemeral(device_name):
def mappings_prepend_dev(mappings):
- """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type"""
+ """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
@@ -78,7 +75,7 @@ _dev = re.compile('^/dev/')
def strip_dev(device_name):
- """remove leading '/dev/'"""
+ """remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
@@ -86,7 +83,7 @@ _pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
- """ remove both leading /dev/ and xvd or sd or vd """
+ """remove both leading /dev/ and xvd or sd or vd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
@@ -95,7 +92,7 @@ def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
- if CONF.compute_driver.endswith('xenapi.XenAPIDriver'):
+ if driver.compute_driver_matches('xenapi.XenAPIDriver'):
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
@@ -142,7 +139,7 @@ def instance_block_mapping(instance, bdms):
def match_device(device):
- """Matches device name and returns prefix, suffix"""
+ """Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
diff --git a/nova/cells/__init__.py b/nova/cells/__init__.py
new file mode 100644
index 000000000..47d21a14b
--- /dev/null
+++ b/nova/cells/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cells
+"""
diff --git a/nova/cells/driver.py b/nova/cells/driver.py
new file mode 100644
index 000000000..04e29dddf
--- /dev/null
+++ b/nova/cells/driver.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Base Cells Communication Driver
+"""
+
+
+class BaseCellsDriver(object):
+ """The base class for cells communication.
+
+ One instance of this class will be created for every neighbor cell
+ that we find in the DB and it will be associated with the cell in
+ its CellState.
+
+ One instance is also created by the cells manager for setting up
+ the consumers.
+ """
+ def start_consumers(self, msg_runner):
+ """Start any consumers the driver may need."""
+ raise NotImplementedError()
+
+ def stop_consumers(self):
+ """Stop consuming messages."""
+ raise NotImplementedError()
+
+ def send_message_to_cell(self, cell_state, message):
+ """Send a message to a cell."""
+ raise NotImplementedError()
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
new file mode 100644
index 000000000..133946794
--- /dev/null
+++ b/nova/cells/manager.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cells Service Manager
+"""
+import datetime
+import time
+
+from nova.cells import messaging
+from nova.cells import state as cells_state
+from nova.cells import utils as cells_utils
+from nova import context
+from nova import exception
+from nova import manager
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+
+cell_manager_opts = [
+ cfg.StrOpt('driver',
+ default='nova.cells.rpc_driver.CellsRPCDriver',
+ help='Cells communication driver to use'),
+ cfg.IntOpt("instance_updated_at_threshold",
+ default=3600,
+ help="Number of seconds after an instance was updated "
+ "or deleted to continue to update cells"),
+ cfg.IntOpt("instance_update_num_instances",
+ default=1,
+ help="Number of instances to update per periodic task run")
+]
+
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.register_opts(cell_manager_opts, group='cells')
+
+
+class CellsManager(manager.Manager):
+ """The nova-cells manager class. This class defines RPC
+ methods that the local cell may call. This class is NOT used for
+ messages coming from other cells. That communication is
+ driver-specific.
+
+ Communication to other cells happens via the messaging module. The
+ MessageRunner from that module will handle routing the message to
+ the correct cell via the communications driver. Most methods below
+ create 'targeted' (where we want to route a message to a specific cell)
+ or 'broadcast' (where we want a message to go to multiple cells)
+ messages.
+
+ Scheduling requests get passed to the scheduler class.
+ """
+ RPC_API_VERSION = '1.1'
+
+ def __init__(self, *args, **kwargs):
+ # Mostly for tests.
+ cell_state_manager = kwargs.pop('cell_state_manager', None)
+ super(CellsManager, self).__init__(*args, **kwargs)
+ if cell_state_manager is None:
+ cell_state_manager = cells_state.CellStateManager
+ self.state_manager = cell_state_manager()
+ self.msg_runner = messaging.MessageRunner(self.state_manager)
+ cells_driver_cls = importutils.import_class(
+ CONF.cells.driver)
+ self.driver = cells_driver_cls()
+ self.instances_to_heal = iter([])
+
+ def post_start_hook(self):
+ """Have the driver start its consumers for inter-cell communication.
+ Also ask our child cells for their capacities and capabilities so
+ we get them more quickly than just waiting for the next periodic
+ update. Receiving the updates from the children will cause us to
+ update our parents. If we don't have any children, just update
+ our parents immediately.
+ """
+ # FIXME(comstud): There's currently no hooks when services are
+ # stopping, so we have no way to stop consumers cleanly.
+ self.driver.start_consumers(self.msg_runner)
+ ctxt = context.get_admin_context()
+ if self.state_manager.get_child_cells():
+ self.msg_runner.ask_children_for_capabilities(ctxt)
+ self.msg_runner.ask_children_for_capacities(ctxt)
+ else:
+ self._update_our_parents(ctxt)
+
+ @manager.periodic_task
+ def _update_our_parents(self, ctxt):
+ """Update our parent cells with our capabilities and capacity
+ if we're at the bottom of the tree.
+ """
+ self.msg_runner.tell_parents_our_capabilities(ctxt)
+ self.msg_runner.tell_parents_our_capacities(ctxt)
+
+ @manager.periodic_task
+ def _heal_instances(self, ctxt):
+ """Periodic task to send updates for a number of instances to
+ parent cells.
+
+ On every run of the periodic task, we will attempt to sync
+ 'CONF.cells.instance_update_num_instances' number of instances.
+ When we get the list of instances, we shuffle them so that multiple
+ nova-cells services aren't attempting to sync the same instances
+ in lockstep.
+
+ If CONF.cells.instance_update_at_threshold is set, only attempt
+ to sync instances that have been updated recently. The CONF
+ setting defines the maximum number of seconds old the updated_at
+ can be. Ie, a threshold of 3600 means to only update instances
+ that have modified in the last hour.
+ """
+
+ if not self.state_manager.get_parent_cells():
+ # No need to sync up if we have no parents.
+ return
+
+ info = {'updated_list': False}
+
+ def _next_instance():
+ try:
+ instance = self.instances_to_heal.next()
+ except StopIteration:
+ if info['updated_list']:
+ return
+ threshold = CONF.cells.instance_updated_at_threshold
+ updated_since = None
+ if threshold > 0:
+ updated_since = timeutils.utcnow() - datetime.timedelta(
+ seconds=threshold)
+ self.instances_to_heal = cells_utils.get_instances_to_sync(
+ ctxt, updated_since=updated_since, shuffle=True,
+ uuids_only=True)
+ info['updated_list'] = True
+ try:
+ instance = self.instances_to_heal.next()
+ except StopIteration:
+ return
+ return instance
+
+ rd_context = ctxt.elevated(read_deleted='yes')
+
+ for i in xrange(CONF.cells.instance_update_num_instances):
+ while True:
+ # Yield to other greenthreads
+ time.sleep(0)
+ instance_uuid = _next_instance()
+ if not instance_uuid:
+ return
+ try:
+ instance = self.db.instance_get_by_uuid(rd_context,
+ instance_uuid)
+ except exception.InstanceNotFound:
+ continue
+ self._sync_instance(ctxt, instance)
+ break
+
+ def _sync_instance(self, ctxt, instance):
+ """Broadcast an instance_update or instance_destroy message up to
+ parent cells.
+ """
+ if instance['deleted']:
+ self.instance_destroy_at_top(ctxt, instance)
+ else:
+ self.instance_update_at_top(ctxt, instance)
+
+ def schedule_run_instance(self, ctxt, host_sched_kwargs):
+ """Pick a cell (possibly ourselves) to build new instance(s)
+ and forward the request accordingly.
+ """
+ # Target is ourselves first.
+ our_cell = self.state_manager.get_my_state()
+ self.msg_runner.schedule_run_instance(ctxt, our_cell,
+ host_sched_kwargs)
+
+ def get_cell_info_for_neighbors(self, _ctxt):
+ """Return cell information for our neighbor cells."""
+ return self.state_manager.get_cell_info_for_neighbors()
+
+ def run_compute_api_method(self, ctxt, cell_name, method_info, call):
+ """Call a compute API method in a specific cell."""
+ response = self.msg_runner.run_compute_api_method(ctxt,
+ cell_name,
+ method_info,
+ call)
+ if call:
+ return response.value_or_raise()
+
+ def instance_update_at_top(self, ctxt, instance):
+ """Update an instance at the top level cell."""
+ self.msg_runner.instance_update_at_top(ctxt, instance)
+
+ def instance_destroy_at_top(self, ctxt, instance):
+ """Destroy an instance at the top level cell."""
+ self.msg_runner.instance_destroy_at_top(ctxt, instance)
+
+ def instance_delete_everywhere(self, ctxt, instance, delete_type):
+ """This is used by API cell when it didn't know what cell
+ an instance was in, but the instance was requested to be
+ deleted or soft_deleted. So, we'll broadcast this everywhere.
+ """
+ self.msg_runner.instance_delete_everywhere(ctxt, instance,
+ delete_type)
+
+ def instance_fault_create_at_top(self, ctxt, instance_fault):
+ """Create an instance fault at the top level cell."""
+ self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
+
+ def bw_usage_update_at_top(self, ctxt, bw_update_info):
+ """Update bandwidth usage at top level cell."""
+ self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
+
+ def sync_instances(self, ctxt, project_id, updated_since, deleted):
+ """Force a sync of all instances, potentially by project_id,
+ and potentially since a certain date/time.
+ """
+ self.msg_runner.sync_instances(ctxt, project_id, updated_since,
+ deleted)
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
new file mode 100644
index 000000000..34ca74855
--- /dev/null
+++ b/nova/cells/messaging.py
@@ -0,0 +1,1080 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cell messaging module.
+
+This module defines the different message types that are passed between
+cells and the methods that they can call when the target cell has been
+reached.
+
+The interface into this module is the MessageRunner class.
+"""
+import sys
+
+from eventlet import queue
+
+from nova.cells import state as cells_state
+from nova.cells import utils as cells_utils
+from nova import compute
+from nova import context
+from nova.db import base
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import excutils
+from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+from nova.openstack.common.rpc import common as rpc_common
+from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
+from nova import utils
+
+
+cell_messaging_opts = [
+ cfg.IntOpt('max_hop_count',
+ default=10,
+ help='Maximum number of hops for cells routing.'),
+ cfg.StrOpt('scheduler',
+ default='nova.cells.scheduler.CellsScheduler',
+ help='Cells scheduler to use')]
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
+CONF.register_opts(cell_messaging_opts, group='cells')
+
+LOG = logging.getLogger(__name__)
+
+# Separator used between cell names for the 'full cell name' and routing
+# path.
+_PATH_CELL_SEP = '!'
+
+
+def _reverse_path(path):
+ """Reverse a path. Used for sending responses upstream."""
+ path_parts = path.split(_PATH_CELL_SEP)
+ path_parts.reverse()
+ return _PATH_CELL_SEP.join(path_parts)
+
+
+def _response_cell_name_from_path(routing_path, neighbor_only=False):
+ """Reverse the routing_path. If we only want to send to our parent,
+ set neighbor_only to True.
+ """
+ path = _reverse_path(routing_path)
+ if not neighbor_only or len(path) == 1:
+ return path
+ return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2])
+
+
+#
+# Message classes.
+#
+
+
+class _BaseMessage(object):
+ """Base message class. It defines data that is passed with every
+ single message through every cell.
+
+ Messages are JSON-ified before sending and turned back into a
+ class instance when being received.
+
+ Every message has a unique ID. This is used to route responses
+ back to callers. In the future, this might be used to detect
+ receiving the same message more than once.
+
+ routing_path is updated on every hop through a cell. The current
+ cell name is appended to it (cells are separated by
+ _PATH_CELL_SEP ('!')). This is used to tell if we've reached the
+ target cell and also to determine the source of a message for
+ responses by reversing it.
+
+ hop_count is incremented and compared against max_hop_count. The
+ only current usefulness of this is to break out of a routing loop
+ if someone has a broken config.
+
+ fanout means to send to all nova-cells services running in a cell.
+ This is useful for capacity and capability broadcasting as well
+ as making sure responses get back to the nova-cells service that
+ is waiting.
+ """
+
+ # Override message_type in a subclass
+ message_type = None
+
+ base_attrs_to_json = ['message_type',
+ 'ctxt',
+ 'method_name',
+ 'method_kwargs',
+ 'direction',
+ 'need_response',
+ 'fanout',
+ 'uuid',
+ 'routing_path',
+ 'hop_count',
+ 'max_hop_count']
+
+ def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
+ direction, need_response=False, fanout=False, uuid=None,
+ routing_path=None, hop_count=0, max_hop_count=None,
+ **kwargs):
+ self.ctxt = ctxt
+ self.resp_queue = None
+ self.msg_runner = msg_runner
+ self.state_manager = msg_runner.state_manager
+ # Copy these.
+ self.base_attrs_to_json = self.base_attrs_to_json[:]
+ # Normally this would just be CONF.cells.name, but going through
+ # the msg_runner allows us to stub it more easily.
+ self.our_path_part = self.msg_runner.our_name
+ self.uuid = uuid
+ if self.uuid is None:
+ self.uuid = uuidutils.generate_uuid()
+ self.method_name = method_name
+ self.method_kwargs = method_kwargs
+ self.direction = direction
+ self.need_response = need_response
+ self.fanout = fanout
+ self.routing_path = routing_path
+ self.hop_count = hop_count
+ if max_hop_count is None:
+ max_hop_count = CONF.cells.max_hop_count
+ self.max_hop_count = max_hop_count
+ self.is_broadcast = False
+ self._append_hop()
+ # Each sub-class should set this when the message is inited
+ self.next_hops = []
+ self.resp_queue = None
+
+ def __repr__(self):
+ _dict = self._to_dict()
+ _dict.pop('method_kwargs')
+ return "<%s: %s>" % (self.__class__.__name__, _dict)
+
+ def _append_hop(self):
+ """Add our hop to the routing_path."""
+ routing_path = (self.routing_path and
+ self.routing_path + _PATH_CELL_SEP or '')
+ self.routing_path = routing_path + self.our_path_part
+ self.hop_count += 1
+
+ def _at_max_hop_count(self, do_raise=True):
+ """Check if we're at the max hop count. If we are and do_raise is
+ True, raise CellMaxHopCountReached. If we are at the max and
+ do_raise is False... return True, else False.
+ """
+ if self.hop_count >= self.max_hop_count:
+ if do_raise:
+ raise exception.CellMaxHopCountReached(
+ hop_count=self.hop_count)
+ return True
+ return False
+
+ def _process_locally(self):
+ """Its been determined that we should process this message in this
+ cell. Go through the MessageRunner to call the appropriate
+ method for this message. Catch the response and/or exception and
+ encode it within a Response instance. Return it so the caller
+ can potentially return it to another cell... or return it to
+ a caller waiting in this cell.
+ """
+ try:
+ resp_value = self.msg_runner._process_message_locally(self)
+ failure = False
+ except Exception as exc:
+ resp_value = sys.exc_info()
+ failure = True
+ LOG.exception(_("Error processing message locally: %(exc)s"),
+ locals())
+ return Response(self.routing_path, resp_value, failure)
+
+ def _setup_response_queue(self):
+ """Shortcut to creating a response queue in the MessageRunner."""
+ self.resp_queue = self.msg_runner._setup_response_queue(self)
+
+ def _cleanup_response_queue(self):
+ """Shortcut to deleting a response queue in the MessageRunner."""
+ if self.resp_queue:
+ self.msg_runner._cleanup_response_queue(self)
+ self.resp_queue = None
+
+ def _wait_for_json_responses(self, num_responses=1):
+ """Wait for response(s) to be put into the eventlet queue. Since
+ each queue entry actually contains a list of JSON-ified responses,
+ combine them all into a single list to return.
+
+ Destroy the eventlet queue when done.
+ """
+ if not self.resp_queue:
+ # Source is not actually expecting a response
+ return
+ responses = []
+ wait_time = CONF.cells.call_timeout
+ try:
+ for x in xrange(num_responses):
+ json_responses = self.resp_queue.get(timeout=wait_time)
+ responses.extend(json_responses)
+ except queue.Empty:
+ raise exception.CellTimeout()
+ finally:
+ self._cleanup_response_queue()
+ return responses
+
+ def _send_json_responses(self, json_responses, neighbor_only=False,
+ fanout=False):
+ """Send list of responses to this message. Responses passed here
+ are JSON-ified. Targeted messages have a single response while
+ Broadcast messages may have multiple responses.
+
+ If this cell was the source of the message, these responses will
+ be returned from self.process().
+
+ Otherwise, we will route the response to the source of the
+ request. If 'neighbor_only' is True, the response will be sent
+ to the neighbor cell, not the original requester. Broadcast
+ messages get aggregated at each hop, so neighbor_only will be
+ True for those messages.
+ """
+ if not self.need_response:
+ return
+ if self.source_is_us():
+ responses = []
+ for json_response in json_responses:
+ responses.append(Response.from_json(json_response))
+ return responses
+ direction = self.direction == 'up' and 'down' or 'up'
+ response_kwargs = {'orig_message': self.to_json(),
+ 'responses': json_responses}
+ target_cell = _response_cell_name_from_path(self.routing_path,
+ neighbor_only=neighbor_only)
+ response = self.msg_runner._create_response_message(self.ctxt,
+ direction, target_cell, self.uuid, response_kwargs,
+ fanout=fanout)
+ response.process()
+
+ def _send_response(self, response, neighbor_only=False):
+ """Send a response to this message. If the source of the
+ request was ourselves, just return the response. It'll be
+ passed back to the caller of self.process(). See DocString for
+ _send_json_responses() as it handles most of the real work for
+ this method.
+
+ 'response' is an instance of Response class.
+ """
+ if not self.need_response:
+ return
+ if self.source_is_us():
+ return response
+ self._send_json_responses([response.to_json()],
+ neighbor_only=neighbor_only)
+
+ def _send_response_from_exception(self, exc_info):
+ """Take an exception as returned from sys.exc_info(), encode
+ it in a Response, and send it.
+ """
+ response = Response(self.routing_path, exc_info, True)
+ return self._send_response(response)
+
+ def _to_dict(self):
+ """Convert a message to a dictionary. Only used internally."""
+ _dict = {}
+ for key in self.base_attrs_to_json:
+ _dict[key] = getattr(self, key)
+ return _dict
+
+ def to_json(self):
+ """Convert a message into JSON for sending to a sibling cell."""
+ _dict = self._to_dict()
+ # Convert context to dict.
+ _dict['ctxt'] = _dict['ctxt'].to_dict()
+ return jsonutils.dumps(_dict)
+
+ def source_is_us(self):
+ """Did this cell create this message?"""
+ return self.routing_path == self.our_path_part
+
+ def process(self):
+ """Process a message. Deal with it locally and/or forward it to a
+ sibling cell.
+
+ Override in a subclass.
+ """
+ raise NotImplementedError()
+
+
+class _TargetedMessage(_BaseMessage):
+ """A targeted message is a message that is destined for a specific
+ single cell.
+
+ 'target_cell' can be a full cell name like 'api!child-cell' or it can
+ be an instance of the CellState class if the target is a neighbor cell.
+ """
+ message_type = 'targeted'
+
+ def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
+ direction, target_cell, **kwargs):
+ super(_TargetedMessage, self).__init__(msg_runner, ctxt,
+ method_name, method_kwargs, direction, **kwargs)
+ if isinstance(target_cell, cells_state.CellState):
+ # Neighbor cell or ourselves. Convert it to a 'full path'.
+ if target_cell.is_me:
+ target_cell = self.our_path_part
+ else:
+ target_cell = '%s%s%s' % (self.our_path_part,
+ _PATH_CELL_SEP,
+ target_cell.name)
+ self.target_cell = target_cell
+ self.base_attrs_to_json.append('target_cell')
+
+ def _get_next_hop(self):
+ """Return the cell name for the next hop. If the next hop is
+ the current cell, return None.
+ """
+ if self.target_cell == self.routing_path:
+ return self.state_manager.my_cell_state
+ target_cell = self.target_cell
+ routing_path = self.routing_path
+ current_hops = routing_path.count(_PATH_CELL_SEP)
+ next_hop_num = current_hops + 1
+ dest_hops = target_cell.count(_PATH_CELL_SEP)
+ if dest_hops < current_hops:
+ reason = _("destination is %(target_cell)s but routing_path "
+ "is %(routing_path)s") % locals()
+ raise exception.CellRoutingInconsistency(reason=reason)
+ dest_name_parts = target_cell.split(_PATH_CELL_SEP)
+ if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) !=
+ routing_path):
+ reason = _("destination is %(target_cell)s but routing_path "
+ "is %(routing_path)s") % locals()
+ raise exception.CellRoutingInconsistency(reason=reason)
+ next_hop_name = dest_name_parts[next_hop_num]
+ if self.direction == 'up':
+ next_hop = self.state_manager.get_parent_cell(next_hop_name)
+ else:
+ next_hop = self.state_manager.get_child_cell(next_hop_name)
+ if not next_hop:
+ cell_type = 'parent' if self.direction == 'up' else 'child'
+ reason = _("Unknown %(cell_type)s when routing to "
+ "%(target_cell)s") % locals()
+ raise exception.CellRoutingInconsistency(reason=reason)
+ return next_hop
+
+ def process(self):
+ """Process a targeted message. This is called for all cells
+ that touch this message. If the local cell is the one that
+ created this message, we reply directly with a Response instance.
+ If the local cell is not the target, an eventlet queue is created
+ and we wait for the response to show up via another thread
+ receiving the Response back.
+
+ Responses to targeted messages are routed directly back to the
+ source. No eventlet queues are created in intermediate hops.
+
+ All exceptions for processing the message across the whole
+ routing path are caught and encoded within the Response and
+ returned to the caller.
+ """
+ try:
+ next_hop = self._get_next_hop()
+ except Exception as exc:
+ exc_info = sys.exc_info()
+ LOG.exception(_("Error locating next hop for message: %(exc)s"),
+ locals())
+ return self._send_response_from_exception(exc_info)
+
+ if next_hop.is_me:
+ # Final destination.
+ response = self._process_locally()
+ return self._send_response(response)
+
+ # Need to forward via neighbor cell.
+ if self.need_response and self.source_is_us():
+ # A response is needed and the source of the message is
+ # this cell. Create the eventlet queue.
+ self._setup_response_queue()
+ wait_for_response = True
+ else:
+ wait_for_response = False
+
+ try:
+ # This is inside the try block, so we can encode the
+ # exception and return it to the caller.
+ if self.hop_count >= self.max_hop_count:
+ raise exception.CellMaxHopCountReached(
+ hop_count=self.hop_count)
+ next_hop.send_message(self)
+ except Exception as exc:
+ exc_info = sys.exc_info()
+ err_str = _("Failed to send message to cell: %(next_hop)s: "
+ "%(exc)s")
+ LOG.exception(err_str, locals())
+ self._cleanup_response_queue()
+ return self._send_response_from_exception(exc_info)
+
+ if wait_for_response:
+ # Targeted messages only have 1 response.
+ remote_response = self._wait_for_json_responses()[0]
+ return Response.from_json(remote_response)
+
+
+class _BroadcastMessage(_BaseMessage):
+ """A broadcast message. This means to call a method in every single
+ cell going in a certain direction.
+ """
+ message_type = 'broadcast'
+
+ def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
+ direction, run_locally=True, **kwargs):
+ super(_BroadcastMessage, self).__init__(msg_runner, ctxt,
+ method_name, method_kwargs, direction, **kwargs)
+ # The local cell creating this message has the option
+ # to be able to process the message locally or not.
+ self.run_locally = run_locally
+ self.is_broadcast = True
+
+ def _get_next_hops(self):
+ """Set the next hops and return the number of hops. The next
+ hops may include ourself.
+ """
+ if self.hop_count >= self.max_hop_count:
+ return []
+ if self.direction == 'down':
+ return self.state_manager.get_child_cells()
+ else:
+ return self.state_manager.get_parent_cells()
+
+ def _send_to_cells(self, target_cells):
+ """Send a message to multiple cells."""
+ for cell in target_cells:
+ cell.send_message(self)
+
+ def _send_json_responses(self, json_responses):
+ """Responses to broadcast messages always need to go to the
+ neighbor cell from which we received this message. That
+ cell aggregates the responses and makes sure to forward them
+ to the correct source.
+ """
+ return super(_BroadcastMessage, self)._send_json_responses(
+ json_responses, neighbor_only=True, fanout=True)
+
+ def process(self):
+ """Process a broadcast message. This is called for all cells
+ that touch this message.
+
+ The message is sent to all cells in the certain direction and
+ the creator of this message has the option of whether or not
+ to process it locally as well.
+
+ If responses from all cells are required, each hop creates an
+ eventlet queue and waits for responses from its immediate
+ neighbor cells. All responses are then aggregated into a
+ single list and are returned to the neighbor cell until the
+ source is reached.
+
+ When the source is reached, a list of Response instances are
+ returned to the caller.
+
+ All exceptions for processing the message across the whole
+ routing path are caught and encoded within the Response and
+ returned to the caller. It is possible to get a mix of
+ successful responses and failure responses. The caller is
+ responsible for dealing with this.
+ """
+ try:
+ next_hops = self._get_next_hops()
+ except Exception as exc:
+ exc_info = sys.exc_info()
+ LOG.exception(_("Error locating next hops for message: %(exc)s"),
+ locals())
+ return self._send_response_from_exception(exc_info)
+
+ # Short circuit if we don't need to respond
+ if not self.need_response:
+ if self.run_locally:
+ self._process_locally()
+ self._send_to_cells(next_hops)
+ return
+
+ # We'll need to aggregate all of the responses (from ourself
+ # and our sibling cells) into 1 response
+ try:
+ self._setup_response_queue()
+ self._send_to_cells(next_hops)
+ except Exception as exc:
+ # Error just trying to send to cells. Send a single response
+ # with the failure.
+ exc_info = sys.exc_info()
+ LOG.exception(_("Error sending message to next hops: %(exc)s"),
+ locals())
+ self._cleanup_response_queue()
+ return self._send_response_from_exception(exc_info)
+
+ if self.run_locally:
+ # Run locally and store the Response.
+ local_response = self._process_locally()
+ else:
+ local_response = None
+
+ try:
+ remote_responses = self._wait_for_json_responses(
+ num_responses=len(next_hops))
+ except Exception as exc:
+ # Error waiting for responses, most likely a timeout.
+ # Send a single response back with the failure.
+ exc_info = sys.exc_info()
+ err_str = _("Error waiting for responses from neighbor cells: "
+ "%(exc)s")
+ LOG.exception(err_str, locals())
+ return self._send_response_from_exception(exc_info)
+
+ if local_response:
+ remote_responses.append(local_response.to_json())
+ return self._send_json_responses(remote_responses)
+
+
+class _ResponseMessage(_TargetedMessage):
+ """A response message is really just a special targeted message,
+ saying to call 'parse_responses' when we reach the source of a 'call'.
+
+ The 'fanout' attribute on this message may be true if we're responding
+ to a broadcast or if we're about to respond to the source of an
+ original target message. Because multiple nova-cells services may
+ be running within a cell, we need to make sure the response gets
+ back to the correct one, so we have to fanout.
+ """
+ message_type = 'response'
+
+ def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
+ direction, target_cell, response_uuid, **kwargs):
+ super(_ResponseMessage, self).__init__(msg_runner, ctxt,
+ method_name, method_kwargs, direction, target_cell, **kwargs)
+ self.response_uuid = response_uuid
+ self.base_attrs_to_json.append('response_uuid')
+
+ def process(self):
+ """Process a response. If the target is the local cell, process
+ the response here. Otherwise, forward it to where it needs to
+ go.
+ """
+ next_hop = self._get_next_hop()
+ if next_hop.is_me:
+ self._process_locally()
+ return
+ if self.fanout is False:
+ # Really there's 1 more hop on each of these below, but
+ # it doesn't matter for this logic.
+ target_hops = self.target_cell.count(_PATH_CELL_SEP)
+ current_hops = self.routing_path.count(_PATH_CELL_SEP)
+ if current_hops + 1 == target_hops:
+ # Next hop is the target.. so we must fanout. See
+ # DocString above.
+ self.fanout = True
+ next_hop.send_message(self)
+
+
+#
+# Methods that may be called when processing messages after reaching
+# a target cell.
+#
+
+
+class _BaseMessageMethods(base.Base):
+ """Base class for defining methods by message types."""
+ def __init__(self, msg_runner):
+ super(_BaseMessageMethods, self).__init__()
+ self.msg_runner = msg_runner
+ self.state_manager = msg_runner.state_manager
+ self.compute_api = compute.API()
+
+
+class _ResponseMessageMethods(_BaseMessageMethods):
+ """Methods that are called from a ResponseMessage. There's only
+ 1 method (parse_responses) and it is called when the message reaches
+ the source of a 'call'. All we do is stuff the response into the
+ eventlet queue to signal the caller that's waiting.
+ """
+ def parse_responses(self, message, orig_message, responses):
+ self.msg_runner._put_response(message.response_uuid,
+ responses)
+
+
+class _TargetedMessageMethods(_BaseMessageMethods):
+ """These are the methods that can be called when routing a message
+ to a specific cell.
+ """
+ def __init__(self, *args, **kwargs):
+ super(_TargetedMessageMethods, self).__init__(*args, **kwargs)
+
+ def schedule_run_instance(self, message, host_sched_kwargs):
+ """Parent cell told us to schedule new instance creation."""
+ self.msg_runner.scheduler.run_instance(message, host_sched_kwargs)
+
+ def run_compute_api_method(self, message, method_info):
+ """Run a method in the compute api class."""
+ method = method_info['method']
+ fn = getattr(self.compute_api, method, None)
+ if not fn:
+ detail = _("Unknown method '%(method)s' in compute API")
+ raise exception.CellServiceAPIMethodNotFound(
+ detail=detail % locals())
+ args = list(method_info['method_args'])
+ # 1st arg is instance_uuid that we need to turn into the
+ # instance object.
+ instance_uuid = args[0]
+ try:
+ instance = self.db.instance_get_by_uuid(message.ctxt,
+ instance_uuid)
+ except exception.InstanceNotFound:
+ with excutils.save_and_reraise_exception():
+ # Must be a race condition. Let's try to resolve it by
+ # telling the top level cells that this instance doesn't
+ # exist.
+ instance = {'uuid': instance_uuid}
+ self.msg_runner.instance_destroy_at_top(message.ctxt,
+ instance)
+ args[0] = instance
+ return fn(message.ctxt, *args, **method_info['method_kwargs'])
+
+ def update_capabilities(self, message, cell_name, capabilities):
+ """A child cell told us about their capabilities."""
+ LOG.debug(_("Received capabilities from child cell "
+ "%(cell_name)s: %(capabilities)s"), locals())
+ self.state_manager.update_cell_capabilities(cell_name,
+ capabilities)
+ # Go ahead and update our parents now that a child updated us
+ self.msg_runner.tell_parents_our_capabilities(message.ctxt)
+
+ def update_capacities(self, message, cell_name, capacities):
+ """A child cell told us about their capacity."""
+ LOG.debug(_("Received capacities from child cell "
+ "%(cell_name)s: %(capacities)s"), locals())
+ self.state_manager.update_cell_capacities(cell_name,
+ capacities)
+ # Go ahead and update our parents now that a child updated us
+ self.msg_runner.tell_parents_our_capacities(message.ctxt)
+
+ def announce_capabilities(self, message):
+ """A parent cell has told us to send our capabilities, so let's
+ do so.
+ """
+ self.msg_runner.tell_parents_our_capabilities(message.ctxt)
+
+ def announce_capacities(self, message):
+ """A parent cell has told us to send our capacity, so let's
+ do so.
+ """
+ self.msg_runner.tell_parents_our_capacities(message.ctxt)
+
+
+class _BroadcastMessageMethods(_BaseMessageMethods):
+ """These are the methods that can be called as a part of a broadcast
+ message.
+ """
+ def _at_the_top(self):
+ """Are we the API level?"""
+ return not self.state_manager.get_parent_cells()
+
+ def instance_update_at_top(self, message, instance, **kwargs):
+ """Update an instance in the DB if we're a top level cell."""
+ if not self._at_the_top():
+ return
+ instance_uuid = instance['uuid']
+
+ # Remove things that we can't update in the top level cells.
+ # 'metadata' is only updated in the API cell, so don't overwrite
+ # it based on what child cells say. Make sure to update
+ # 'cell_name' based on the routing path.
+ items_to_remove = ['id', 'security_groups', 'instance_type',
+ 'volumes', 'cell_name', 'name', 'metadata']
+ for key in items_to_remove:
+ instance.pop(key, None)
+ instance['cell_name'] = _reverse_path(message.routing_path)
+
+ # Fixup info_cache. We'll have to update this separately if
+ # it exists.
+ info_cache = instance.pop('info_cache', None)
+ if info_cache is not None:
+ info_cache.pop('id', None)
+ info_cache.pop('instance', None)
+
+ # Fixup system_metadata (should be a dict for update, not a list)
+ if ('system_metadata' in instance and
+ isinstance(instance['system_metadata'], list)):
+ sys_metadata = dict([(md['key'], md['value'])
+ for md in instance['system_metadata']])
+ instance['system_metadata'] = sys_metadata
+
+ LOG.debug(_("Got update for instance %(instance_uuid)s: "
+ "%(instance)s") % locals())
+
+ # It's possible due to some weird condition that the instance
+ # was already set as deleted... so we'll attempt to update
+ # it with permissions that allows us to read deleted.
+ with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
+ try:
+ self.db.instance_update(message.ctxt, instance_uuid,
+ instance, update_cells=False)
+ except exception.NotFound:
+ # FIXME(comstud): Strange. Need to handle quotas here,
+ # if we actually want this code to remain..
+ self.db.instance_create(message.ctxt, instance)
+ if info_cache:
+ self.db.instance_info_cache_update(message.ctxt, instance_uuid,
+ info_cache, update_cells=False)
+
+ def instance_destroy_at_top(self, message, instance, **kwargs):
+ """Destroy an instance from the DB if we're a top level cell."""
+ if not self._at_the_top():
+ return
+ instance_uuid = instance['uuid']
+ LOG.debug(_("Got update to delete instance %(instance_uuid)s") %
+ locals())
+ try:
+ self.db.instance_destroy(message.ctxt, instance_uuid,
+ update_cells=False)
+ except exception.InstanceNotFound:
+ pass
+
+ def instance_delete_everywhere(self, message, instance, delete_type,
+ **kwargs):
+ """Call compute API delete() or soft_delete() in every cell.
+ This is used when the API cell doesn't know what cell an instance
+ belongs to but the instance was requested to be deleted or
+ soft-deleted. So, we'll run it everywhere.
+ """
+ LOG.debug(_("Got broadcast to %(delete_type)s delete instance"),
+ locals(), instance=instance)
+ if delete_type == 'soft':
+ self.compute_api.soft_delete(message.ctxt, instance)
+ else:
+ self.compute_api.delete(message.ctxt, instance)
+
+ def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
+ """Destroy an instance from the DB if we're a top level cell."""
+ if not self._at_the_top():
+ return
+ items_to_remove = ['id']
+ for key in items_to_remove:
+ instance_fault.pop(key, None)
+ log_str = _("Got message to create instance fault: "
+ "%(instance_fault)s")
+ LOG.debug(log_str, locals())
+ self.db.instance_fault_create(message.ctxt, instance_fault)
+
+ def bw_usage_update_at_top(self, message, bw_update_info, **kwargs):
+ """Update Bandwidth usage in the DB if we're a top level cell."""
+ if not self._at_the_top():
+ return
+ self.db.bw_usage_update(message.ctxt, **bw_update_info)
+
+ def _sync_instance(self, ctxt, instance):
+ if instance['deleted']:
+ self.msg_runner.instance_destroy_at_top(ctxt, instance)
+ else:
+ self.msg_runner.instance_update_at_top(ctxt, instance)
+
+ def sync_instances(self, message, project_id, updated_since, deleted,
+ **kwargs):
+ projid_str = project_id is None and "<all>" or project_id
+ since_str = updated_since is None and "<all>" or updated_since
+ LOG.info(_("Forcing a sync of instances, project_id="
+ "%(projid_str)s, updated_since=%(since_str)s"), locals())
+ if updated_since is not None:
+ updated_since = timeutils.parse_isotime(updated_since)
+ instances = cells_utils.get_instances_to_sync(message.ctxt,
+ updated_since=updated_since, project_id=project_id,
+ deleted=deleted)
+ for instance in instances:
+ self._sync_instance(message.ctxt, instance)
+
+
+_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
+ 'broadcast': _BroadcastMessage,
+ 'response': _ResponseMessage}
+_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods,
+ 'broadcast': _BroadcastMessageMethods,
+ 'response': _ResponseMessageMethods}
+
+
+#
+# Below are the public interfaces into this module.
+#
+
+
+class MessageRunner(object):
+ """This class is the main interface into creating messages and
+ processing them.
+
+ Public methods in this class are typically called by the CellsManager
+ to create a new message and process it with the exception of
+ 'message_from_json' which should be used by CellsDrivers to convert
+ a JSONified message it has received back into the appropriate Message
+ class.
+
+ Private methods are used internally when we need to keep some
+ 'global' state. For instance, eventlet queues used for responses are
+ held in this class. Also, when a Message is process()ed above and
+ it's determined we should take action locally,
+ _process_message_locally() will be called.
+
+ When needing to add a new method to call in a Cell2Cell message,
+ define the new method below and also add it to the appropriate
+ MessageMethods class where the real work will be done.
+ """
+
+ def __init__(self, state_manager):
+ self.state_manager = state_manager
+ cells_scheduler_cls = importutils.import_class(
+ CONF.cells.scheduler)
+ self.scheduler = cells_scheduler_cls(self)
+ self.response_queues = {}
+ self.methods_by_type = {}
+ self.our_name = CONF.cells.name
+ for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
+ self.methods_by_type[msg_type] = cls(self)
+
+ def _process_message_locally(self, message):
+ """Message processing will call this when its determined that
+ the message should be processed within this cell. Find the
+ method to call based on the message type, and call it. The
+ caller is responsible for catching exceptions and returning
+ results to cells, if needed.
+ """
+ methods = self.methods_by_type[message.message_type]
+ fn = getattr(methods, message.method_name)
+ return fn(message, **message.method_kwargs)
+
+ def _put_response(self, response_uuid, response):
+ """Put a response into a response queue. This is called when
+ a _ResponseMessage is processed in the cell that initiated a
+ 'call' to another cell.
+ """
+ resp_queue = self.response_queues.get(response_uuid)
+ if not resp_queue:
+ # Response queue is gone. We must have restarted or we
+ # received a response after our timeout period.
+ return
+ resp_queue.put(response)
+
+ def _setup_response_queue(self, message):
+ """Set up an eventlet queue to use to wait for replies.
+
+ Replies come back from the target cell as a _ResponseMessage
+ being sent back to the source.
+ """
+ resp_queue = queue.Queue()
+ self.response_queues[message.uuid] = resp_queue
+ return resp_queue
+
+ def _cleanup_response_queue(self, message):
+ """Stop tracking the response queue either because we're
+ done receiving responses, or we've timed out.
+ """
+ try:
+ del self.response_queues[message.uuid]
+ except KeyError:
+ # Ignore if queue is gone already somehow.
+ pass
+
+ def _create_response_message(self, ctxt, direction, target_cell,
+ response_uuid, response_kwargs, **kwargs):
+ """Create a ResponseMessage. This is used internally within
+ the messaging module.
+ """
+ return _ResponseMessage(self, ctxt, 'parse_responses',
+ response_kwargs, direction, target_cell,
+ response_uuid, **kwargs)
+
+ def message_from_json(self, json_message):
+ """Turns a message in JSON format into an appropriate Message
+ instance. This is called when cells receive a message from
+ another cell.
+ """
+ message_dict = jsonutils.loads(json_message)
+ message_type = message_dict.pop('message_type')
+ # Need to convert context back.
+ ctxt = message_dict['ctxt']
+ message_dict['ctxt'] = context.RequestContext.from_dict(ctxt)
+ message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type]
+ return message_cls(self, **message_dict)
+
+ def ask_children_for_capabilities(self, ctxt):
+ """Tell child cells to send us capabilities. This is typically
+ called on startup of the nova-cells service.
+ """
+ child_cells = self.state_manager.get_child_cells()
+ for child_cell in child_cells:
+ message = _TargetedMessage(self, ctxt,
+ 'announce_capabilities',
+ dict(), 'down', child_cell)
+ message.process()
+
+ def ask_children_for_capacities(self, ctxt):
+ """Tell child cells to send us capacities. This is typically
+ called on startup of the nova-cells service.
+ """
+ child_cells = self.state_manager.get_child_cells()
+ for child_cell in child_cells:
+ message = _TargetedMessage(self, ctxt, 'announce_capacities',
+ dict(), 'down', child_cell)
+ message.process()
+
+ def tell_parents_our_capabilities(self, ctxt):
+ """Send our capabilities to parent cells."""
+ parent_cells = self.state_manager.get_parent_cells()
+ if not parent_cells:
+ return
+ my_cell_info = self.state_manager.get_my_state()
+ capabs = self.state_manager.get_our_capabilities()
+ LOG.debug(_("Updating parents with our capabilities: %(capabs)s"),
+ locals())
+ # We have to turn the sets into lists so they can potentially
+ # be json encoded when the raw message is sent.
+ for key, values in capabs.items():
+ capabs[key] = list(values)
+ method_kwargs = {'cell_name': my_cell_info.name,
+ 'capabilities': capabs}
+ for cell in parent_cells:
+ message = _TargetedMessage(self, ctxt, 'update_capabilities',
+ method_kwargs, 'up', cell, fanout=True)
+ message.process()
+
+ def tell_parents_our_capacities(self, ctxt):
+ """Send our capacities to parent cells."""
+ parent_cells = self.state_manager.get_parent_cells()
+ if not parent_cells:
+ return
+ my_cell_info = self.state_manager.get_my_state()
+ capacities = self.state_manager.get_our_capacities()
+ LOG.debug(_("Updating parents with our capacities: %(capacities)s"),
+ locals())
+ method_kwargs = {'cell_name': my_cell_info.name,
+ 'capacities': capacities}
+ for cell in parent_cells:
+ message = _TargetedMessage(self, ctxt, 'update_capacities',
+ method_kwargs, 'up', cell, fanout=True)
+ message.process()
+
+ def schedule_run_instance(self, ctxt, target_cell, host_sched_kwargs):
+ """Called by the scheduler to tell a child cell to schedule
+ a new instance for build.
+ """
+ method_kwargs = dict(host_sched_kwargs=host_sched_kwargs)
+ message = _TargetedMessage(self, ctxt, 'schedule_run_instance',
+ method_kwargs, 'down',
+ target_cell)
+ message.process()
+
+ def run_compute_api_method(self, ctxt, cell_name, method_info, call):
+ """Call a compute API method in a specific cell."""
+ message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
+ dict(method_info=method_info), 'down',
+ cell_name, need_response=call)
+ return message.process()
+
+ def instance_update_at_top(self, ctxt, instance):
+ """Update an instance at the top level cell."""
+ message = _BroadcastMessage(self, ctxt, 'instance_update_at_top',
+ dict(instance=instance), 'up',
+ run_locally=False)
+ message.process()
+
+ def instance_destroy_at_top(self, ctxt, instance):
+ """Destroy an instance at the top level cell."""
+ message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top',
+ dict(instance=instance), 'up',
+ run_locally=False)
+ message.process()
+
+ def instance_delete_everywhere(self, ctxt, instance, delete_type):
+ """This is used by API cell when it didn't know what cell
+ an instance was in, but the instance was requested to be
+ deleted or soft_deleted. So, we'll broadcast this everywhere.
+ """
+ method_kwargs = dict(instance=instance, delete_type=delete_type)
+ message = _BroadcastMessage(self, ctxt,
+ 'instance_delete_everywhere',
+ method_kwargs, 'down',
+ run_locally=False)
+ message.process()
+
+ def instance_fault_create_at_top(self, ctxt, instance_fault):
+ """Create an instance fault at the top level cell."""
+ message = _BroadcastMessage(self, ctxt,
+ 'instance_fault_create_at_top',
+ dict(instance_fault=instance_fault),
+ 'up', run_locally=False)
+ message.process()
+
+ def bw_usage_update_at_top(self, ctxt, bw_update_info):
+ """Update bandwidth usage at top level cell."""
+ message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top',
+ dict(bw_update_info=bw_update_info),
+ 'up', run_locally=False)
+ message.process()
+
+ def sync_instances(self, ctxt, project_id, updated_since, deleted):
+ """Force a sync of all instances, potentially by project_id,
+ and potentially since a certain date/time.
+ """
+ method_kwargs = dict(project_id=project_id,
+ updated_since=updated_since,
+ deleted=deleted)
+ message = _BroadcastMessage(self, ctxt, 'sync_instances',
+ method_kwargs, 'down',
+ run_locally=False)
+ message.process()
+
+ @staticmethod
+ def get_message_types():
+ return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
+
+
+class Response(object):
+ """Holds a response from a cell. If there was a failure, 'failure'
+ will be True and 'response' will contain an encoded Exception.
+ """
+ def __init__(self, cell_name, value, failure):
+ self.failure = failure
+ self.cell_name = cell_name
+ self.value = value
+
+ def to_json(self):
+ resp_value = self.value
+ if self.failure:
+ resp_value = rpc_common.serialize_remote_exception(resp_value,
+ log_failure=False)
+ _dict = {'cell_name': self.cell_name,
+ 'value': resp_value,
+ 'failure': self.failure}
+ return jsonutils.dumps(_dict)
+
+ @classmethod
+ def from_json(cls, json_message):
+ _dict = jsonutils.loads(json_message)
+ if _dict['failure']:
+ resp_value = rpc_common.deserialize_remote_exception(
+ CONF, _dict['value'])
+ _dict['value'] = resp_value
+ return cls(**_dict)
+
+ def value_or_raise(self):
+ if self.failure:
+ if isinstance(self.value, (tuple, list)):
+ raise self.value[0], self.value[1], self.value[2]
+ else:
+ raise self.value
+ return self.value
diff --git a/nova/cells/opts.py b/nova/cells/opts.py
new file mode 100644
index 000000000..45b453ebc
--- /dev/null
+++ b/nova/cells/opts.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Global cells config options
+"""
+
+from nova.openstack.common import cfg
+
+cells_opts = [
+ cfg.BoolOpt('enable',
+ default=False,
+ help='Enable cell functionality'),
+ cfg.StrOpt('topic',
+ default='cells',
+ help='the topic cells nodes listen on'),
+ cfg.StrOpt('manager',
+ default='nova.cells.manager.CellsManager',
+ help='Manager for cells'),
+ cfg.StrOpt('name',
+ default='nova',
+ help='name of this cell'),
+ cfg.ListOpt('capabilities',
+ default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
+ help='Key/Multi-value list with the capabilities of the cell'),
+ cfg.IntOpt('call_timeout',
+ default=60,
+ help='Seconds to wait for response from a call to a cell.'),
+]
+
+cfg.CONF.register_opts(cells_opts, group='cells')
diff --git a/nova/cells/rpc_driver.py b/nova/cells/rpc_driver.py
new file mode 100644
index 000000000..5e420aa8e
--- /dev/null
+++ b/nova/cells/rpc_driver.py
@@ -0,0 +1,165 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cells RPC Communication Driver
+"""
+from nova.cells import driver
+from nova.openstack.common import cfg
+from nova.openstack.common import rpc
+from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
+from nova.openstack.common.rpc import proxy as rpc_proxy
+
+cell_rpc_driver_opts = [
+ cfg.StrOpt('rpc_driver_queue_base',
+ default='cells.intercell',
+ help="Base queue name to use when communicating between "
+ "cells. Various topics by message type will be "
+ "appended to this.")]
+
+CONF = cfg.CONF
+CONF.register_opts(cell_rpc_driver_opts, group='cells')
+CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
+
+_CELL_TO_CELL_RPC_API_VERSION = '1.0'
+
+
+class CellsRPCDriver(driver.BaseCellsDriver):
+ """Driver for cell<->cell communication via RPC. This is used to
+ setup the RPC consumers as well as to send a message to another cell.
+
+ One instance of this class will be created for every neighbor cell
+ that we find in the DB and it will be associated with the cell in
+ its CellState.
+
+ One instance is also created by the cells manager for setting up
+ the consumers.
+ """
+ BASE_RPC_API_VERSION = _CELL_TO_CELL_RPC_API_VERSION
+
+ def __init__(self, *args, **kwargs):
+ super(CellsRPCDriver, self).__init__(*args, **kwargs)
+ self.rpc_connections = []
+ self.intercell_rpcapi = InterCellRPCAPI(
+ self.BASE_RPC_API_VERSION)
+
+ def _start_consumer(self, dispatcher, topic):
+ """Start an RPC consumer."""
+ conn = rpc.create_connection(new=True)
+ conn.create_consumer(topic, dispatcher, fanout=False)
+ conn.create_consumer(topic, dispatcher, fanout=True)
+ self.rpc_connections.append(conn)
+ conn.consume_in_thread()
+ return conn
+
+ def start_consumers(self, msg_runner):
+ """Start RPC consumers.
+
+ Start up 2 separate consumers for handling inter-cell
+ communication via RPC. Both handle the same types of
+ messages, but requests/replies are separated to solve
+ potential deadlocks. (If we used the same queue for both,
+ it's possible to exhaust the RPC thread pool while we wait
+ for replies.. such that we'd never consume a reply.)
+ """
+ topic_base = CONF.cells.rpc_driver_queue_base
+ proxy_manager = InterCellRPCDispatcher(msg_runner)
+ dispatcher = rpc_dispatcher.RpcDispatcher([proxy_manager])
+ for msg_type in msg_runner.get_message_types():
+ topic = '%s.%s' % (topic_base, msg_type)
+ self._start_consumer(dispatcher, topic)
+
+ def stop_consumers(self):
+ """Stop RPC consumers.
+
+ NOTE: Currently there's no hooks when stopping services
+ to have managers cleanup, so this is not currently called.
+ """
+ for conn in self.rpc_connections:
+ conn.close()
+
+ def send_message_to_cell(self, cell_state, message):
+ """Use the IntercellRPCAPI to send a message to a cell."""
+ self.intercell_rpcapi.send_message_to_cell(cell_state, message)
+
+
+class InterCellRPCAPI(rpc_proxy.RpcProxy):
+ """Client side of the Cell<->Cell RPC API.
+
+ The CellsRPCDriver uses this to make calls to another cell.
+
+ API version history:
+ 1.0 - Initial version.
+ """
+ def __init__(self, default_version):
+ super(InterCellRPCAPI, self).__init__(None, default_version)
+
+ @staticmethod
+ def _get_server_params_for_cell(next_hop):
+ """Turn the DB information for a cell into the parameters
+ needed for the RPC call.
+ """
+ param_map = {'username': 'username',
+ 'password': 'password',
+ 'rpc_host': 'hostname',
+ 'rpc_port': 'port',
+ 'rpc_virtual_host': 'virtual_host'}
+ server_params = {}
+ for source, target in param_map.items():
+ if next_hop.db_info[source]:
+ server_params[target] = next_hop.db_info[source]
+ return server_params
+
+ def send_message_to_cell(self, cell_state, message):
+ """Send a message to another cell by JSON-ifying the message and
+ making an RPC cast to 'process_message'. If the message says to
+ fanout, do it. The topic that is used will be
+ 'CONF.rpc_driver_queue_base.<message_type>'.
+ """
+ ctxt = message.ctxt
+ json_message = message.to_json()
+ rpc_message = self.make_msg('process_message', message=json_message)
+ topic_base = CONF.cells.rpc_driver_queue_base
+ topic = '%s.%s' % (topic_base, message.message_type)
+ server_params = self._get_server_params_for_cell(cell_state)
+ if message.fanout:
+ self.fanout_cast_to_server(ctxt, server_params,
+ rpc_message, topic=topic)
+ else:
+ self.cast_to_server(ctxt, server_params,
+ rpc_message, topic=topic)
+
+
+class InterCellRPCDispatcher(object):
+ """RPC Dispatcher to handle messages received from other cells.
+
+ All messages received here have come from a sibling cell. Depending
+ on the ultimate target and type of message, we may process the message
+ in this cell, relay the message to another sibling cell, or both. This
+ logic is defined by the message class in the messaging module.
+ """
+ BASE_RPC_API_VERSION = _CELL_TO_CELL_RPC_API_VERSION
+
+ def __init__(self, msg_runner):
+ """Init the Intercell RPC Dispatcher."""
+ self.msg_runner = msg_runner
+
+ def process_message(self, _ctxt, message):
+ """We received a message from another cell. Use the MessageRunner
+ to turn this from JSON back into an instance of the correct
+ Message class. Then process it!
+ """
+ message = self.msg_runner.message_from_json(message)
+ message.process()
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
new file mode 100644
index 000000000..0ab4fc352
--- /dev/null
+++ b/nova/cells/rpcapi.py
@@ -0,0 +1,157 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Client side of nova-cells RPC API (for talking to the nova-cells service
+within a cell).
+
+This is different than communication between child and parent nova-cells
+services. That communication is handled by the cells driver via the
+messging module.
+"""
+
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+from nova.openstack.common.rpc import proxy as rpc_proxy
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+CONF.import_opt('topic', 'nova.cells.opts', group='cells')
+
+
+class CellsAPI(rpc_proxy.RpcProxy):
+ '''Cells client-side RPC API
+
+ API version history:
+
+ 1.0 - Initial version.
+ 1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
+ '''
+ BASE_RPC_API_VERSION = '1.0'
+
+ def __init__(self):
+ super(CellsAPI, self).__init__(topic=CONF.cells.topic,
+ default_version=self.BASE_RPC_API_VERSION)
+
+ def cast_compute_api_method(self, ctxt, cell_name, method,
+ *args, **kwargs):
+ """Make a cast to a compute API method in a certain cell."""
+ method_info = {'method': method,
+ 'method_args': args,
+ 'method_kwargs': kwargs}
+ self.cast(ctxt, self.make_msg('run_compute_api_method',
+ cell_name=cell_name,
+ method_info=method_info,
+ call=False))
+
+ def call_compute_api_method(self, ctxt, cell_name, method,
+ *args, **kwargs):
+ """Make a call to a compute API method in a certain cell."""
+ method_info = {'method': method,
+ 'method_args': args,
+ 'method_kwargs': kwargs}
+ return self.call(ctxt, self.make_msg('run_compute_api_method',
+ cell_name=cell_name,
+ method_info=method_info,
+ call=True))
+
+ def schedule_run_instance(self, ctxt, **kwargs):
+ """Schedule a new instance for creation."""
+ self.cast(ctxt, self.make_msg('schedule_run_instance',
+ host_sched_kwargs=kwargs))
+
+ def instance_update_at_top(self, ctxt, instance):
+ """Update instance at API level."""
+ if not CONF.cells.enable:
+ return
+ # Make sure we have a dict, not a SQLAlchemy model
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('instance_update_at_top',
+ instance=instance_p))
+
+ def instance_destroy_at_top(self, ctxt, instance):
+ """Destroy instance at API level."""
+ if not CONF.cells.enable:
+ return
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('instance_destroy_at_top',
+ instance=instance_p))
+
+ def instance_delete_everywhere(self, ctxt, instance, delete_type):
+ """Delete instance everywhere. delete_type may be 'soft'
+ or 'hard'. This is generally only used to resolve races
+ when API cell doesn't know to what cell an instance belongs.
+ """
+ if not CONF.cells.enable:
+ return
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('instance_delete_everywhere',
+ instance=instance_p,
+ delete_type=delete_type))
+
+ def instance_fault_create_at_top(self, ctxt, instance_fault):
+ """Create an instance fault at the top."""
+ if not CONF.cells.enable:
+ return
+ instance_fault_p = jsonutils.to_primitive(instance_fault)
+ self.cast(ctxt, self.make_msg('instance_fault_create_at_top',
+ instance_fault=instance_fault_p))
+
+ def bw_usage_update_at_top(self, ctxt, uuid, mac, start_period,
+ bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None):
+ """Broadcast upwards that bw_usage was updated."""
+ if not CONF.cells.enable:
+ return
+ bw_update_info = {'uuid': uuid,
+ 'mac': mac,
+ 'start_period': start_period,
+ 'bw_in': bw_in,
+ 'bw_out': bw_out,
+ 'last_ctr_in': last_ctr_in,
+ 'last_ctr_out': last_ctr_out,
+ 'last_refreshed': last_refreshed}
+ self.cast(ctxt, self.make_msg('bw_usage_update_at_top',
+ bw_update_info=bw_update_info))
+
+ def instance_info_cache_update_at_top(self, ctxt, instance_info_cache):
+ """Broadcast up that an instance's info_cache has changed."""
+ if not CONF.cells.enable:
+ return
+ iicache = jsonutils.to_primitive(instance_info_cache)
+ instance = {'uuid': iicache['instance_uuid'],
+ 'info_cache': iicache}
+ self.cast(ctxt, self.make_msg('instance_update_at_top',
+ instance=instance))
+
+ def get_cell_info_for_neighbors(self, ctxt):
+ """Get information about our neighbor cells from the manager."""
+ if not CONF.cells.enable:
+ return []
+ return self.call(ctxt, self.make_msg('get_cell_info_for_neighbors'),
+ version='1.1')
+
+ def sync_instances(self, ctxt, project_id=None, updated_since=None,
+ deleted=False):
+ """Ask all cells to sync instance data."""
+ if not CONF.cells.enable:
+ return
+ return self.cast(ctxt, self.make_msg('sync_instances',
+ project_id=project_id,
+ updated_since=updated_since,
+ deleted=deleted),
+ version='1.1')
diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py
new file mode 100644
index 000000000..0b730290a
--- /dev/null
+++ b/nova/cells/scheduler.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cells Scheduler
+"""
+import random
+import time
+
+from nova import compute
+from nova.compute import vm_states
+from nova.db import base
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.scheduler import rpcapi as scheduler_rpcapi
+
+cell_scheduler_opts = [
+ cfg.IntOpt('scheduler_retries',
+ default=10,
+ help='How many retries when no cells are available.'),
+ cfg.IntOpt('scheduler_retry_delay',
+ default=2,
+ help='How often to retry in seconds when no cells are '
+ 'available.')
+]
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.register_opts(cell_scheduler_opts, group='cells')
+
+
+class CellsScheduler(base.Base):
+ """The cells scheduler."""
+
+ def __init__(self, msg_runner):
+ super(CellsScheduler, self).__init__()
+ self.msg_runner = msg_runner
+ self.state_manager = msg_runner.state_manager
+ self.compute_api = compute.API()
+ self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+
+ def _create_instances_here(self, ctxt, request_spec):
+ instance_values = request_spec['instance_properties']
+ for instance_uuid in request_spec['instance_uuids']:
+ instance_values['uuid'] = instance_uuid
+ instance = self.compute_api.create_db_entry_for_new_instance(
+ ctxt,
+ request_spec['instance_type'],
+ request_spec['image'],
+ instance_values,
+ request_spec['security_group'],
+ request_spec['block_device_mapping'])
+ self.msg_runner.instance_update_at_top(ctxt, instance)
+
+ def _get_possible_cells(self):
+ cells = set(self.state_manager.get_child_cells())
+ our_cell = self.state_manager.get_my_state()
+ # Include our cell in the list, if we have any capacity info
+ if not cells or our_cell.capacities:
+ cells.add(our_cell)
+ return cells
+
+ def _run_instance(self, message, host_sched_kwargs):
+ """Attempt to schedule instance(s). If we have no cells
+ to try, raise exception.NoCellsAvailable
+ """
+ ctxt = message.ctxt
+ request_spec = host_sched_kwargs['request_spec']
+
+ # The message we might forward to a child cell
+ cells = self._get_possible_cells()
+ if not cells:
+ raise exception.NoCellsAvailable()
+ cells = list(cells)
+
+ # Random selection for now
+ random.shuffle(cells)
+ target_cell = cells[0]
+
+ LOG.debug(_("Scheduling with routing_path=%(routing_path)s"),
+ locals())
+
+ if target_cell.is_me:
+ # Need to create instance DB entries as the host scheduler
+ # expects that the instance(s) already exists.
+ self._create_instances_here(ctxt, request_spec)
+ self.scheduler_rpcapi.run_instance(ctxt,
+ **host_sched_kwargs)
+ return
+ self.msg_runner.schedule_run_instance(ctxt, target_cell,
+ host_sched_kwargs)
+
+ def run_instance(self, message, host_sched_kwargs):
+ """Pick a cell where we should create a new instance."""
+ try:
+ for i in xrange(max(0, CONF.cells.scheduler_retries) + 1):
+ try:
+ return self._run_instance(message, host_sched_kwargs)
+ except exception.NoCellsAvailable:
+ if i == max(0, CONF.cells.scheduler_retries):
+ raise
+ sleep_time = max(1, CONF.cells.scheduler_retry_delay)
+ LOG.info(_("No cells available when scheduling. Will "
+ "retry in %(sleep_time)s second(s)"), locals())
+ time.sleep(sleep_time)
+ continue
+ except Exception:
+ request_spec = host_sched_kwargs['request_spec']
+ instance_uuids = request_spec['instance_uuids']
+ LOG.exception(_("Error scheduling instances %(instance_uuids)s"),
+ locals())
+ ctxt = message.ctxt
+ for instance_uuid in instance_uuids:
+ self.msg_runner.instance_update_at_top(ctxt,
+ {'uuid': instance_uuid,
+ 'vm_state': vm_states.ERROR})
+ try:
+ self.db.instance_update(ctxt,
+ instance_uuid,
+ {'vm_state': vm_states.ERROR})
+ except Exception:
+ pass
diff --git a/nova/cells/state.py b/nova/cells/state.py
new file mode 100644
index 000000000..e3886bedb
--- /dev/null
+++ b/nova/cells/state.py
@@ -0,0 +1,354 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+CellState Manager
+"""
+import copy
+import datetime
+import functools
+
+from nova.cells import rpc_driver
+from nova import context
+from nova.db import base
+from nova.openstack.common import cfg
+from nova.openstack.common import lockutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+
+cell_state_manager_opts = [
+ cfg.IntOpt('db_check_interval',
+ default=60,
+ help='Seconds between getting fresh cell info from db.'),
+]
+
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
+CONF.register_opts(cell_state_manager_opts, group='cells')
+
+
+class CellState(object):
+ """Holds information for a particular cell."""
+ def __init__(self, cell_name, is_me=False):
+ self.name = cell_name
+ self.is_me = is_me
+ self.last_seen = datetime.datetime.min
+ self.capabilities = {}
+ self.capacities = {}
+ self.db_info = {}
+ # TODO(comstud): The DB will specify the driver to use to talk
+ # to this cell, but there's no column for this yet. The only
+ # available driver is the rpc driver.
+ self.driver = rpc_driver.CellsRPCDriver()
+
+ def update_db_info(self, cell_db_info):
+ """Update cell credentials from db."""
+ self.db_info = dict(
+ [(k, v) for k, v in cell_db_info.iteritems()
+ if k != 'name'])
+
+ def update_capabilities(self, cell_metadata):
+ """Update cell capabilities for a cell."""
+ self.last_seen = timeutils.utcnow()
+ self.capabilities = cell_metadata
+
+ def update_capacities(self, capacities):
+ """Update capacity information for a cell."""
+ self.last_seen = timeutils.utcnow()
+ self.capacities = capacities
+
+ def get_cell_info(self):
+ """Return subset of cell information for OS API use."""
+ db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset',
+ 'username', 'rpc_host', 'rpc_port']
+ cell_info = dict(name=self.name, capabilities=self.capabilities)
+ if self.db_info:
+ for field in db_fields_to_return:
+ cell_info[field] = self.db_info[field]
+ return cell_info
+
+ def send_message(self, message):
+ """Send a message to a cell. Just forward this to the driver,
+ passing ourselves and the message as arguments.
+ """
+ self.driver.send_message_to_cell(self, message)
+
+ def __repr__(self):
+ me = "me" if self.is_me else "not_me"
+ return "Cell '%s' (%s)" % (self.name, me)
+
+
+def sync_from_db(f):
+ """Use as a decorator to wrap methods that use cell information to
+ make sure they sync the latest information from the DB periodically.
+ """
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if self._time_to_sync():
+ self._cell_db_sync()
+ return f(self, *args, **kwargs)
+ return wrapper
+
+
+class CellStateManager(base.Base):
+ def __init__(self, cell_state_cls=None):
+ super(CellStateManager, self).__init__()
+ if not cell_state_cls:
+ cell_state_cls = CellState
+ self.cell_state_cls = cell_state_cls
+ self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
+ self.parent_cells = {}
+ self.child_cells = {}
+ self.last_cell_db_check = datetime.datetime.min
+ self._cell_db_sync()
+ my_cell_capabs = {}
+ for cap in CONF.cells.capabilities:
+ name, value = cap.split('=', 1)
+ if ';' in value:
+ values = set(value.split(';'))
+ else:
+ values = set([value])
+ my_cell_capabs[name] = values
+ self.my_cell_state.update_capabilities(my_cell_capabs)
+
+ def _refresh_cells_from_db(self, ctxt):
+ """Make our cell info map match the db."""
+ # Add/update existing cells ...
+ db_cells = self.db.cell_get_all(ctxt)
+ db_cells_dict = dict([(cell['name'], cell) for cell in db_cells])
+
+ # Update current cells. Delete ones that disappeared
+ for cells_dict in (self.parent_cells, self.child_cells):
+ for cell_name, cell_info in cells_dict.items():
+ is_parent = cell_info.db_info['is_parent']
+ db_dict = db_cells_dict.get(cell_name)
+ if db_dict and is_parent == db_dict['is_parent']:
+ cell_info.update_db_info(db_dict)
+ else:
+ del cells_dict[cell_name]
+
+ # Add new cells
+ for cell_name, db_info in db_cells_dict.items():
+ if db_info['is_parent']:
+ cells_dict = self.parent_cells
+ else:
+ cells_dict = self.child_cells
+ if cell_name not in cells_dict:
+ cells_dict[cell_name] = self.cell_state_cls(cell_name)
+ cells_dict[cell_name].update_db_info(db_info)
+
+ def _time_to_sync(self):
+ """Is it time to sync the DB against our memory cache?"""
+ diff = timeutils.utcnow() - self.last_cell_db_check
+ return diff.seconds >= CONF.cells.db_check_interval
+
+ def _update_our_capacity(self, context):
+ """Update our capacity in the self.my_cell_state CellState.
+
+ This will add/update 2 entries in our CellState.capacities,
+ 'ram_free' and 'disk_free'.
+
+ The values of these are both dictionaries with the following
+ format:
+
+ {'total_mb': <total_memory_free_in_the_cell>,
+ 'units_by_mb: <units_dictionary>}
+
+ <units_dictionary> contains the number of units that we can
+ build for every instance_type that we have. This number is
+ computed by looking at room available on every compute_node.
+
+ Take the following instance_types as an example:
+
+ [{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
+ {'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
+
+ capacities['ram_free']['units_by_mb'] would contain the following:
+
+ {'1024': <number_of_instances_that_will_fit>,
+ '2048': <number_of_instances_that_will_fit>}
+
+ capacities['disk_free']['units_by_mb'] would contain the following:
+
+ {'122880': <number_of_instances_that_will_fit>,
+ '225280': <number_of_instances_that_will_fit>}
+
+ Units are in MB, so 122880 = (10 + 100) * 1024.
+
+ NOTE(comstud): Perhaps we should only report a single number
+ available per instance_type.
+ """
+
+ compute_hosts = {}
+
+ def _get_compute_hosts():
+ compute_nodes = self.db.compute_node_get_all(context)
+ for compute in compute_nodes:
+ service = compute['service']
+ if not service or service['disabled']:
+ continue
+ host = service['host']
+ compute_hosts[host] = {
+ 'free_ram_mb': compute['free_ram_mb'],
+ 'free_disk_mb': compute['free_disk_gb'] * 1024}
+
+ _get_compute_hosts()
+ if not compute_hosts:
+ self.my_cell_state.update_capacities({})
+ return
+
+ ram_mb_free_units = {}
+ disk_mb_free_units = {}
+ total_ram_mb_free = 0
+ total_disk_mb_free = 0
+
+ def _free_units(tot, per_inst):
+ if per_inst:
+ return max(0, int(tot / per_inst))
+ else:
+ return 0
+
+ def _update_from_values(values, instance_type):
+ memory_mb = instance_type['memory_mb']
+ disk_mb = (instance_type['root_gb'] +
+ instance_type['ephemeral_gb']) * 1024
+ ram_mb_free_units.setdefault(str(memory_mb), 0)
+ disk_mb_free_units.setdefault(str(disk_mb), 0)
+ ram_free_units = _free_units(compute_values['free_ram_mb'],
+ memory_mb)
+ disk_free_units = _free_units(compute_values['free_disk_mb'],
+ disk_mb)
+ ram_mb_free_units[str(memory_mb)] += ram_free_units
+ disk_mb_free_units[str(disk_mb)] += disk_free_units
+
+ instance_types = self.db.instance_type_get_all(context)
+
+ for compute_values in compute_hosts.values():
+ total_ram_mb_free += compute_values['free_ram_mb']
+ total_disk_mb_free += compute_values['free_disk_mb']
+ for instance_type in instance_types:
+ _update_from_values(compute_values, instance_type)
+
+ capacities = {'ram_free': {'total_mb': total_ram_mb_free,
+ 'units_by_mb': ram_mb_free_units},
+ 'disk_free': {'total_mb': total_disk_mb_free,
+ 'units_by_mb': disk_mb_free_units}}
+ self.my_cell_state.update_capacities(capacities)
+
+ @lockutils.synchronized('cell-db-sync', 'nova-')
+ def _cell_db_sync(self):
+ """Update status for all cells if it's time. Most calls to
+ this are from the check_for_update() decorator that checks
+ the time, but it checks outside of a lock. The duplicate
+ check here is to prevent multiple threads from pulling the
+ information simultaneously.
+ """
+ if self._time_to_sync():
+ LOG.debug(_("Updating cell cache from db."))
+ self.last_cell_db_check = timeutils.utcnow()
+ ctxt = context.get_admin_context()
+ self._refresh_cells_from_db(ctxt)
+ self._update_our_capacity(ctxt)
+
+ @sync_from_db
+ def get_cell_info_for_neighbors(self):
+ """Return cell information for all neighbor cells."""
+ cell_list = [cell.get_cell_info()
+ for cell in self.child_cells.itervalues()]
+ cell_list.extend([cell.get_cell_info()
+ for cell in self.parent_cells.itervalues()])
+ return cell_list
+
+ @sync_from_db
+ def get_my_state(self):
+ """Return information for my (this) cell."""
+ return self.my_cell_state
+
+ @sync_from_db
+ def get_child_cells(self):
+ """Return list of child cell_infos."""
+ return self.child_cells.values()
+
+ @sync_from_db
+ def get_parent_cells(self):
+ """Return list of parent cell_infos."""
+ return self.parent_cells.values()
+
+ @sync_from_db
+ def get_parent_cell(self, cell_name):
+ return self.parent_cells.get(cell_name)
+
+ @sync_from_db
+ def get_child_cell(self, cell_name):
+ return self.child_cells.get(cell_name)
+
+ @sync_from_db
+ def update_cell_capabilities(self, cell_name, capabilities):
+ """Update capabilities for a cell."""
+ cell = self.child_cells.get(cell_name)
+ if not cell:
+ cell = self.parent_cells.get(cell_name)
+ if not cell:
+ LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
+ "update capabilities"), locals())
+ return
+ # Make sure capabilities are sets.
+ for capab_name, values in capabilities.items():
+ capabilities[capab_name] = set(values)
+ cell.update_capabilities(capabilities)
+
+ @sync_from_db
+ def update_cell_capacities(self, cell_name, capacities):
+ """Update capacities for a cell."""
+ cell = self.child_cells.get(cell_name)
+ if not cell:
+ cell = self.parent_cells.get(cell_name)
+ if not cell:
+ LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
+ "update capacities"), locals())
+ return
+ cell.update_capacities(capacities)
+
+ @sync_from_db
+ def get_our_capabilities(self, include_children=True):
+ capabs = copy.deepcopy(self.my_cell_state.capabilities)
+ if include_children:
+ for cell in self.child_cells.values():
+ for capab_name, values in cell.capabilities.items():
+ if capab_name not in capabs:
+ capabs[capab_name] = set([])
+ capabs[capab_name] |= values
+ return capabs
+
+ def _add_to_dict(self, target, src):
+ for key, value in src.items():
+ if isinstance(value, dict):
+ target.setdefault(key, {})
+ self._add_to_dict(target[key], value)
+ continue
+ target.setdefault(key, 0)
+ target[key] += value
+
+ @sync_from_db
+ def get_our_capacities(self, include_children=True):
+ capacities = copy.deepcopy(self.my_cell_state.capacities)
+ if include_children:
+ for cell in self.child_cells.values():
+ self._add_to_dict(capacities, cell.capacities)
+ return capacities
diff --git a/nova/cells/utils.py b/nova/cells/utils.py
new file mode 100644
index 000000000..d25f98fab
--- /dev/null
+++ b/nova/cells/utils.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Cells Utility Methods
+"""
+import random
+
+from nova import db
+
+
+def get_instances_to_sync(context, updated_since=None, project_id=None,
+ deleted=True, shuffle=False, uuids_only=False):
+ """Return a generator that will return a list of active and
+ deleted instances to sync with parent cells. The list may
+ optionally be shuffled for periodic updates so that multiple
+ cells services aren't self-healing the same instances in nearly
+ lockstep.
+ """
+ filters = {}
+ if updated_since is not None:
+ filters['changes-since'] = updated_since
+ if project_id is not None:
+ filters['project_id'] = project_id
+ if not deleted:
+ filters['deleted'] = False
+ # Active instances first.
+ instances = db.instance_get_all_by_filters(
+ context, filters, 'deleted', 'asc')
+ if shuffle:
+ random.shuffle(instances)
+ for instance in instances:
+ if uuids_only:
+ yield instance['uuid']
+ else:
+ yield instance
diff --git a/nova/cert/manager.py b/nova/cert/manager.py
index d23a15450..d1ffbd5a7 100644
--- a/nova/cert/manager.py
+++ b/nova/cert/manager.py
@@ -27,7 +27,6 @@ Cert manager manages x509 certificates.
import base64
from nova import crypto
-from nova import flags
from nova import manager
from nova.openstack.common import log as logging
@@ -35,7 +34,7 @@ LOG = logging.getLogger(__name__)
class CertManager(manager.Manager):
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def init_host(self):
crypto.ensure_ca_filesystem()
@@ -53,17 +52,20 @@ class CertManager(manager.Manager):
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
- """Generate and sign a cert for user in project"""
+ """Generate and sign a cert for user in project."""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
- """Get root ca for a project"""
+ """Get root ca for a project."""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
- """Get crl for a project"""
+ """Get crl for a project."""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/cert/rpcapi.py b/nova/cert/rpcapi.py
index f6ba11579..fdaa327cb 100644
--- a/nova/cert/rpcapi.py
+++ b/nova/cert/rpcapi.py
@@ -18,11 +18,17 @@
Client side of the cert manager RPC API.
"""
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
import nova.openstack.common.rpc.proxy
-CONF = config.CONF
+rpcapi_opts = [
+ cfg.StrOpt('cert_topic',
+ default='cert',
+ help='the topic cert nodes listen on'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(rpcapi_opts)
class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -31,6 +37,7 @@ class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Added get_backdoor_port()
'''
#
@@ -78,3 +85,7 @@ class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
return self.call(ctxt, self.make_msg('decrypt_text',
project_id=project_id,
text=text))
+
+ def get_backdoor_port(self, context, host):
+ return self.call(context, self.make_msg('get_backdoor_port'),
+ version='1.1')
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 414954670..cb7fb8bac 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -28,23 +28,25 @@ import zipfile
from nova import compute
from nova.compute import instance_types
-from nova import config
from nova import crypto
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
+from nova import paths
from nova import utils
cloudpipe_opts = [
+ cfg.StrOpt('vpn_image_id',
+ default='0',
+ help='image id used when starting up a cloudpipe vpn server'),
cfg.StrOpt('vpn_instance_type',
default='m1.tiny',
help=_('Instance type for vpn instances')),
cfg.StrOpt('boot_script_template',
- default='$pybasedir/nova/cloudpipe/bootscript.template',
+ default=paths.basedir_def('nova/cloudpipe/bootscript.template'),
help=_('Template for cloudpipe instance boot script')),
cfg.StrOpt('dmz_net',
default='10.0.0.0',
@@ -52,15 +54,39 @@ cloudpipe_opts = [
cfg.StrOpt('dmz_mask',
default='255.255.255.0',
help=_('Netmask to push into openvpn config')),
+ cfg.StrOpt('vpn_key_suffix',
+ default='-vpn',
+ help='Suffix to add to project name for vpn key and secgroups'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(cloudpipe_opts)
-CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
LOG = logging.getLogger(__name__)
+def is_vpn_image(image_id):
+ return image_id == CONF.vpn_image_id
+
+
+def _load_boot_script():
+ shellfile = open(CONF.boot_script_template, "r")
+ try:
+ s = string.Template(shellfile.read())
+ finally:
+ shellfile.close()
+
+ CONF.import_opt('ec2_dmz_host', 'nova.api.ec2.cloud')
+ CONF.import_opt('ec2_port', 'nova.api.ec2.cloud')
+ CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
+
+ return s.substitute(cc_dmz=CONF.ec2_dmz_host,
+ cc_port=CONF.ec2_port,
+ dmz_net=CONF.dmz_net,
+ dmz_mask=CONF.dmz_mask,
+ num_vpn=CONF.cnt_vpn_clients)
+
+
class CloudPipe(object):
def __init__(self):
self.compute_api = compute.API()
@@ -71,14 +97,7 @@ class CloudPipe(object):
filename = "payload.zip"
zippath = os.path.join(tmpdir, filename)
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
- shellfile = open(CONF.boot_script_template, "r")
- s = string.Template(shellfile.read())
- shellfile.close()
- boot_script = s.substitute(cc_dmz=CONF.ec2_dmz_host,
- cc_port=CONF.ec2_port,
- dmz_net=CONF.dmz_net,
- dmz_mask=CONF.dmz_mask,
- num_vpn=CONF.cnt_vpn_clients)
+ boot_script = _load_boot_script()
# genvpn, sign csr
crypto.generate_vpn_files(project_id)
z.writestr('autorun.sh', boot_script)
diff --git a/nova/common/memorycache.py b/nova/common/memorycache.py
index 502f83381..f77b3f51a 100644
--- a/nova/common/memorycache.py
+++ b/nova/common/memorycache.py
@@ -18,8 +18,28 @@
"""Super simple fake memcache client."""
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
+memcache_opts = [
+ cfg.ListOpt('memcached_servers',
+ default=None,
+ help='Memcached servers or None for in process cache.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(memcache_opts)
+
+
+def get_client():
+ client_cls = Client
+
+ if CONF.memcached_servers:
+ import memcache
+ client_cls = memcache.Client
+
+ return client_cls(CONF.memcached_servers, debug=0)
+
class Client(object):
"""Replicates a tiny subset of memcached client interface."""
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index b770778aa..d1f8cc16c 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -18,11 +18,32 @@
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.compute import <foo>' elsewhere.
-import nova.flags
+import nova.openstack.common.cfg
import nova.openstack.common.importutils
+_compute_opts = [
+ nova.openstack.common.cfg.StrOpt('compute_api_class',
+ default='nova.compute.api.API',
+ help='The full class name of the '
+ 'compute API class to use'),
+]
+
+nova.openstack.common.cfg.CONF.register_opts(_compute_opts)
+
def API(*args, **kwargs):
importutils = nova.openstack.common.importutils
- cls = importutils.import_class(nova.flags.FLAGS.compute_api_class)
- return cls(*args, **kwargs)
+ class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ return importutils.import_object(class_name, *args, **kwargs)
+
+
+def HostAPI(*args, **kwargs):
+ """
+ Returns the 'HostAPI' class from the same module as the configured compute
+ api
+ """
+ importutils = nova.openstack.common.importutils
+ compute_api_class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ compute_api_class = importutils.import_class(compute_api_class_name)
+ class_name = compute_api_class.__module__ + ".HostAPI"
+ return importutils.import_object(class_name, *args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 9ee384393..8ba6b97aa 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -27,7 +27,9 @@ import re
import string
import time
import urllib
+import uuid
+from nova import availability_zones
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
@@ -35,15 +37,15 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
-from nova import flags
+from nova import hooks
from nova.image import glance
from nova import network
from nova import notifications
+from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -53,15 +55,44 @@ from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-CONF = config.CONF
+compute_opts = [
+ cfg.BoolOpt('allow_resize_to_same_host',
+ default=False,
+ help='Allow destination machine to match source for resize. '
+ 'Useful when testing in single-host environments.'),
+ cfg.StrOpt('default_schedule_zone',
+ default=None,
+ help='availability zone to use when user doesn\'t specify one'),
+ cfg.ListOpt('non_inheritable_image_properties',
+ default=['cache_in_nova',
+ 'bittorrent'],
+ help='These are image properties which a snapshot should not'
+ ' inherit from an instance'),
+ cfg.StrOpt('null_kernel',
+ default='nokernel',
+ help='kernel image that indicates not to use a kernel, but to '
+ 'use a raw disk image instead'),
+ cfg.StrOpt('security_group_handler',
+ default='nova.network.sg.NullSecurityGroupHandler',
+ help='The full class name of the security group handler class'),
+ cfg.StrOpt('security_group_api',
+ default='nova.compute.api.SecurityGroupAPI',
+ help='The full class name of the security API class'),
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(compute_opts)
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
@@ -110,7 +141,7 @@ def check_instance_lock(function):
def policy_decorator(scope):
- """Check corresponding policy prior of wrapped method to execution"""
+ """Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
@@ -121,7 +152,7 @@ def policy_decorator(scope):
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
- scope='compute:security_groups')
+ scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
@@ -139,11 +170,15 @@ class API(base.Base):
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
- self.security_group_api = security_group_api or SecurityGroupAPI()
- self.sgh = importutils.import_object(FLAGS.security_group_handler)
+ self.security_group_api = (security_group_api or
+ importutils.import_object(
+ CONF.security_group_api))
+ self.sgh = importutils.import_object(CONF.security_group_handler)
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
+ self.servicegroup_api = servicegroup.API()
+
super(API, self).__init__(**kwargs)
def _instance_update(self, context, instance_uuid, **kwargs):
@@ -280,9 +315,10 @@ class API(base.Base):
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_networks(self, context, requested_networks):
- """ Check if the networks requested belongs to the project
- and the fixed IP address for each network provided is within
- same the network block
+ """
+ Check if the networks requested belongs to the project
+ and the fixed IP address for each network provided is within
+ same the network block
"""
if not requested_networks:
return
@@ -290,8 +326,7 @@ class API(base.Base):
self.network_api.validate_networks(context, requested_networks)
@staticmethod
- def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image,
- image_service):
+ def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
@@ -303,22 +338,28 @@ class API(base.Base):
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
+ image_properties = image.get('properties', {})
+
if kernel_id is None:
- kernel_id = image['properties'].get('kernel_id')
+ kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
- ramdisk_id = image['properties'].get('ramdisk_id')
+ ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
- if kernel_id == str(FLAGS.null_kernel):
+ if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
+ image_service, kernel_id = glance.get_remote_image_service(
+ context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
+ image_service, ramdisk_id = glance.get_remote_image_service(
+ context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@@ -334,15 +375,17 @@ class API(base.Base):
availability_zone, forced_host = availability_zone.split(':')
if not availability_zone:
- availability_zone = FLAGS.default_schedule_zone
+ availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host
@staticmethod
def _inherit_properties_from_image(image, auto_disk_config):
+ image_properties = image.get('properties', {})
+
def prop(prop_, prop_type=None):
"""Return the value of an image property."""
- value = image['properties'].get(prop_)
+ value = image_properties.get(prop_)
if value is not None:
if prop_type == 'bool':
@@ -351,7 +394,7 @@ class API(base.Base):
return value
options_from_image = {'os_type': prop('os_type'),
- 'architecture': prop('arch'),
+ 'architecture': prop('architecture'),
'vm_mode': prop('vm_mode')}
# If instance doesn't have auto_disk_config overridden by request, use
@@ -362,20 +405,20 @@ class API(base.Base):
options_from_image['auto_disk_config'] = auto_disk_config
return options_from_image
- def _create_instance(self, context, instance_type,
- image_href, kernel_id, ramdisk_id,
- min_count, max_count,
- display_name, display_description,
- key_name, key_data, security_group,
- availability_zone, user_data, metadata,
- injected_files, admin_password,
- access_ip_v4, access_ip_v6,
- requested_networks, config_drive,
- block_device_mapping, auto_disk_config,
- reservation_id=None, scheduler_hints=None):
+ def _validate_and_provision_instance(self, context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data,
+ metadata, injected_files,
+ access_ip_v4, access_ip_v6,
+ requested_networks, config_drive,
+ block_device_mapping,
+ auto_disk_config, reservation_id,
+ scheduler_hints):
"""Verify all the input parameters regardless of the provisioning
- strategy being performed and schedule the instance(s) for
- creation."""
+ strategy being performed."""
if not metadata:
metadata = {}
@@ -395,6 +438,19 @@ class API(base.Base):
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
+ if user_data:
+ l = len(user_data)
+ if l > MAX_USERDATA_SIZE:
+ # NOTE(mikal): user_data is stored in a text column, and
+ # the database might silently truncate if its over length.
+ raise exception.InstanceUserDataTooLarge(
+ length=l, maxsize=MAX_USERDATA_SIZE)
+
+ try:
+ base64.decodestring(user_data)
+ except base64.binascii.Error:
+ raise exception.InstanceUserDataMalformed()
+
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
@@ -408,43 +464,42 @@ class API(base.Base):
self._check_injected_file_quota(context, injected_files)
self._check_requested_networks(context, requested_networks)
- (image_service, image_id) = glance.get_remote_image_service(
- context, image_href)
- image = image_service.show(context, image_id)
- if image['status'] != 'active':
- raise exception.ImageNotActive(image_id=image_id)
+ if image_href:
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, image_href)
+ image = image_service.show(context, image_id)
+ if image['status'] != 'active':
+ raise exception.ImageNotActive(image_id=image_id)
+ else:
+ image = {}
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.InstanceTypeMemoryTooSmall()
if instance_type['root_gb'] < int(image.get('min_disk') or 0):
raise exception.InstanceTypeDiskTooSmall()
+ kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
+ context, kernel_id, ramdisk_id, image)
+
# Handle config_drive
config_drive_id = None
- if config_drive and config_drive is not True:
+ if config_drive and not utils.is_valid_boolstr(config_drive):
# config_drive is volume id
config_drive_id = config_drive
config_drive = None
# Ensure config_drive image exists
- image_service.show(context, config_drive_id)
-
- kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
- context, kernel_id, ramdisk_id, image, image_service)
+ cd_image_service, config_drive_id = \
+ glance.get_remote_image_service(context, config_drive_id)
+ cd_image_service.show(context, config_drive_id)
if key_data is None and key_name:
key_pair = self.db.key_pair_get(context, context.user_id,
key_name)
key_data = key_pair['public_key']
- if reservation_id is None:
- reservation_id = utils.generate_uid('r')
-
- # grab the architecture from glance
- architecture = image['properties'].get('architecture', 'Unknown')
-
root_device_name = block_device.properties_root_device_name(
- image['properties'])
+ image.get('properties', {}))
availability_zone, forced_host = self._handle_availability_zone(
availability_zone)
@@ -478,22 +533,8 @@ class API(base.Base):
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
- 'architecture': architecture,
'progress': 0}
- if user_data:
- l = len(user_data)
- if l > MAX_USERDATA_SIZE:
- # NOTE(mikal): user_data is stored in a text column, and
- # the database might silently truncate if its over length.
- raise exception.InstanceUserDataTooLarge(
- length=l, maxsize=MAX_USERDATA_SIZE)
-
- try:
- base64.decodestring(user_data)
- except base64.binascii.Error:
- raise exception.InstanceUserDataMalformed()
-
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
@@ -536,6 +577,36 @@ class API(base.Base):
'security_group': security_group,
}
+ return (instances, request_spec, filter_properties)
+
+ def _create_instance(self, context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password,
+ access_ip_v4, access_ip_v6,
+ requested_networks, config_drive,
+ block_device_mapping, auto_disk_config,
+ reservation_id=None, scheduler_hints=None):
+ """Verify all the input parameters regardless of the provisioning
+ strategy being performed and schedule the instance(s) for
+ creation."""
+
+ if reservation_id is None:
+ reservation_id = utils.generate_uid('r')
+
+ (instances, request_spec, filter_properties) = \
+ self._validate_and_provision_instance(context, instance_type,
+ image_href, kernel_id, ramdisk_id, min_count,
+ max_count, display_name, display_description,
+ key_name, key_data, security_group, availability_zone,
+ user_data, metadata, injected_files, access_ip_v4,
+ access_ip_v6, requested_networks, config_drive,
+ block_device_mapping, auto_disk_config,
+ reservation_id, scheduler_hints)
+
self.scheduler_rpcapi.run_instance(context,
request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
@@ -636,12 +707,13 @@ class API(base.Base):
# require elevated context?
elevated = context.elevated()
instance_uuid = instance['uuid']
- mappings = image['properties'].get('mappings', [])
+ image_properties = image.get('properties', {})
+ mappings = image_properties.get('mappings', [])
if mappings:
self._update_image_block_device_mapping(elevated,
instance_type, instance_uuid, mappings)
- image_bdm = image['properties'].get('block_device_mapping', [])
+ image_bdm = image_properties.get('block_device_mapping', [])
for mapping in (image_bdm, block_device_mapping):
if not mapping:
continue
@@ -651,9 +723,10 @@ class API(base.Base):
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
+ image_properties = image.get('properties', {})
if (block_device_mapping or
- image['properties'].get('mappings') or
- image['properties'].get('block_device_mapping')):
+ image_properties.get('mappings') or
+ image_properties.get('block_device_mapping')):
instance['shutdown_terminate'] = False
def _populate_instance_names(self, instance):
@@ -674,29 +747,29 @@ class API(base.Base):
def _populate_instance_for_create(self, base_options, image,
security_groups):
"""Build the beginning of a new instance."""
+ image_properties = image.get('properties', {})
instance = base_options
if not instance.get('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
- instance['uuid'] = str(utils.gen_uuid())
+ instance['uuid'] = str(uuid.uuid4())
instance['launch_index'] = 0
instance['vm_state'] = vm_states.BUILDING
instance['task_state'] = task_states.SCHEDULING
- instance['architecture'] = image['properties'].get('architecture')
instance['info_cache'] = {'network_info': '[]'}
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
instance.setdefault('system_metadata', {})
- for key, value in image['properties'].iteritems():
+ for key, value in image_properties.iteritems():
new_value = str(value)[:255]
instance['system_metadata']['image_%s' % key] = new_value
# Keep a record of the original base image that this
# image's instance is derived from:
- base_image_ref = image['properties'].get('base_image_ref')
+ base_image_ref = image_properties.get('base_image_ref')
if not base_image_ref:
# base image ref property not previously set through a snapshot.
# default to using the image ref as the base:
@@ -732,6 +805,10 @@ class API(base.Base):
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
+ # ensure_default security group is called before the instance
+ # is created so the creation of the default security group is
+ # proxied to the sgh.
+ self.security_group_api.ensure_default(context)
instance = self.db.instance_create(context, instance)
self._populate_instance_for_bdm(context, instance,
@@ -758,6 +835,7 @@ class API(base.Base):
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
+ @hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
@@ -793,12 +871,26 @@ class API(base.Base):
scheduler_hints=scheduler_hints)
def trigger_provider_fw_rules_refresh(self, context):
- """Called when a rule is added/removed from a provider firewall"""
+ """Called when a rule is added/removed from a provider firewall."""
+
+ host_names = [x['host'] for (x, idx)
+ in self.db.service_get_all_compute_sorted(context)]
+ for host_name in host_names:
+ self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
+
+ def update_state(self, context, instance, new_state):
+ """Updates the state of a compute instance.
+ For example to 'active' or 'error'.
+ Also sets 'task_state' to None.
+ Used by admin_actions api
- hosts = [x['host'] for (x, idx)
- in self.db.service_get_all_compute_sorted(context)]
- for host in hosts:
- self.compute_rpcapi.refresh_provider_fw_rules(context, host)
+ :param context: The security context
+ :param instance: The instance to update
+ :param new_state: A member of vm_state, eg. 'active'
+ """
+ self.update(context, instance,
+ vm_state=new_state,
+ task_state=None)
@wrap_check_policy
def update(self, context, instance, **kwargs):
@@ -835,6 +927,12 @@ class API(base.Base):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
reservations = None
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
try:
# NOTE(maoy): no expected_task_state needs to be set
attrs = {'progress': 0}
@@ -849,6 +947,7 @@ class API(base.Base):
old['task_state'] not in (task_states.DELETING,
task_states.SOFT_DELETING)):
reservations = QUOTAS.reserve(context,
+ project_id=project_id,
instances=-1,
cores=-instance['vcpus'],
ram=-instance['memory_mb'])
@@ -860,7 +959,9 @@ class API(base.Base):
self.db.instance_destroy(context, instance['uuid'],
constraint)
if reservations:
- QUOTAS.commit(context, reservations)
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
return
except exception.ConstraintNotMet:
# Refresh to get new host information
@@ -896,34 +997,38 @@ class API(base.Base):
reservations=downsize_reservations)
is_up = False
- # NOTE(jogo): db allows for multiple compute services per host
try:
- services = self.db.service_get_all_compute_by_host(
+ service = self.db.service_get_by_compute_host(
context.elevated(), instance['host'])
- except exception.ComputeHostNotFound:
- services = []
- for service in services:
- if utils.service_is_up(service):
+ if self.servicegroup_api.service_is_up(service):
is_up = True
cb(context, instance, bdms)
- break
+ except exception.ComputeHostNotFound:
+ pass
+
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
if reservations:
- QUOTAS.commit(context, reservations)
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
- QUOTAS.rollback(context, reservations)
+ QUOTAS.rollback(context,
+ reservations,
+ project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
- QUOTAS.rollback(context, reservations)
+ QUOTAS.rollback(context,
+ reservations,
+ project_id=project_id)
def _local_delete(self, context, instance, bdms):
- LOG.warning(_('host for instance is down, deleting from '
- 'database'), instance=instance)
+ LOG.warning(_("instance's host %s is down, deleting from "
+ "database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
compute_utils.notify_about_instance_usage(
@@ -1113,17 +1218,17 @@ class API(base.Base):
if search_opts is None:
search_opts = {}
+ if 'all_tenants' in search_opts:
+ check_policy(context, "get_all_tenants", target)
+
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
- try:
- instance_type = instance_types.get_instance_type_by_flavor_id(
- flavor_id)
- except exception.FlavorNotFound:
- raise ValueError()
+ instance_type = instance_types.get_instance_type_by_flavor_id(
+ flavor_id)
filters['instance_type_id'] = instance_type['id']
@@ -1198,7 +1303,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
- extra_properties=None):
+ extra_properties=None, image_id=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1208,14 +1313,26 @@ class API(base.Base):
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
- recv_meta = self._create_image(context, instance, name, 'backup',
- backup_type=backup_type, rotation=rotation,
- extra_properties=extra_properties)
- return recv_meta
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_BACKUP,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'backup', backup_type=backup_type,
+ rotation=rotation, extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='backup',
+ backup_type=backup_type, rotation=rotation)
+ return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
- def snapshot(self, context, instance, name, extra_properties=None):
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1224,12 +1341,25 @@ class API(base.Base):
:returns: A dict containing image metadata
"""
- return self._create_image(context, instance, name, 'snapshot',
- extra_properties=extra_properties)
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_SNAPSHOT,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'snapshot', extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='snapshot')
+ return image_meta
def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
- """Create snapshot or backup for an instance on this host.
+ """Create new image entry in the image service. This new image
+ will be reserved for the compute manager to upload a snapshot
+ or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1243,29 +1373,6 @@ class API(base.Base):
"""
instance_uuid = instance['uuid']
- if image_type == "snapshot":
- task_state = task_states.IMAGE_SNAPSHOT
- elif image_type == "backup":
- task_state = task_states.IMAGE_BACKUP
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
-
- # change instance state and notify
- old_vm_state = instance["vm_state"]
- old_task_state = instance["task_state"]
-
- self.db.instance_test_and_set(
- context, instance_uuid, 'task_state', [None], task_state)
-
- # NOTE(sirp): `instance_test_and_set` only sets the task-state in the
- # DB, but we also need to set it on the current instance so that the
- # correct value is passed down to the compute manager.
- instance['task_state'] = task_state
-
- notifications.send_update_with_states(context, instance, old_vm_state,
- instance["vm_state"], old_task_state, instance["task_state"],
- service="api", verify_states=True)
-
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
@@ -1305,18 +1412,14 @@ class API(base.Base):
key = key[len(prefix):]
# Skip properties that are non-inheritable
- if key in FLAGS.non_inheritable_image_properties:
+ if key in CONF.non_inheritable_image_properties:
continue
# By using setdefault, we ensure that the properties set
# up above will not be overwritten by inherited values
properties.setdefault(key, value)
- recv_meta = self.image_service.create(context, sent_meta)
- self.compute_rpcapi.snapshot_instance(context, instance=instance,
- image_id=recv_meta['id'], image_type=image_type,
- backup_type=backup_type, rotation=rotation)
- return recv_meta
+ return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
@@ -1448,7 +1551,8 @@ class API(base.Base):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
- state=instance['task_state'])
+ state=instance['task_state'],
+ method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance = self.update(context, instance, vm_state=vm_states.ACTIVE,
@@ -1458,12 +1562,9 @@ class API(base.Base):
elevated = context.elevated()
block_info = self._get_block_device_info(elevated,
instance['uuid'])
- network_info = self.network_api.get_instance_nw_info(elevated,
- instance)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
- network_info=network_info,
reboot_type=reboot_type)
def _get_image(self, context, image_href):
@@ -1479,8 +1580,12 @@ class API(base.Base):
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
- orig_image_ref = instance['image_ref']
- image = self._get_image(context, image_href)
+ if instance['image_ref']:
+ orig_image_ref = instance['image_ref']
+ image = self._get_image(context, image_href)
+ else:
+ orig_image_ref = ''
+ image = {}
files_to_inject = kwargs.pop('files_to_inject', [])
self._check_injected_file_quota(context, files_to_inject)
@@ -1494,11 +1599,14 @@ class API(base.Base):
if instance_type['root_gb'] < int(image.get('min_disk') or 0):
raise exception.InstanceTypeDiskTooSmall()
- (image_service, image_id) = glance.get_remote_image_service(context,
- image_href)
- image = image_service.show(context, image_id)
+ if image_href:
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, image_href)
+ image = image_service.show(context, image_id)
+ else:
+ image = {}
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
- context, None, None, image, image_service)
+ context, None, None, image)
def _reset_image_metadata():
"""
@@ -1521,7 +1629,7 @@ class API(base.Base):
if key.startswith('image_'):
del sys_metadata[key]
# Add the new ones
- for key, value in image['properties'].iteritems():
+ for key, value in image.get('properties', {}).iteritems():
new_value = str(value)[:255]
sys_metadata['image_%s' % key] = new_value
self.db.instance_system_metadata_update(context,
@@ -1542,10 +1650,13 @@ class API(base.Base):
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
+ bdms = self.db.block_device_mapping_get_all_by_instance(context,
+ instance['uuid'])
+
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
- orig_sys_metadata=orig_sys_metadata)
+ orig_sys_metadata=orig_sys_metadata, bdms=bdms)
@wrap_check_policy
@check_instance_lock
@@ -1564,13 +1675,18 @@ class API(base.Base):
task_state=task_states.RESIZE_REVERTING,
expected_task_state=None)
+ self.db.migration_update(elevated, migration_ref['id'],
+ {'status': 'reverting'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
+
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
- self.db.migration_update(elevated, migration_ref['id'],
- {'status': 'reverted'})
-
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
@@ -1588,14 +1704,19 @@ class API(base.Base):
task_state=None,
expected_task_state=None)
+ self.db.migration_update(elevated, migration_ref['id'],
+ {'status': 'confirming'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
+
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['source_compute'],
reservations=reservations)
- self.db.migration_update(elevated, migration_ref['id'],
- {'status': 'confirmed'})
-
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
@@ -1731,7 +1852,7 @@ class API(base.Base):
pid = context.project_id
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
- " tried to resize instance. %(msg)s"), locals())
+ " tried to resize instance."), locals())
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
@@ -1749,9 +1870,15 @@ class API(base.Base):
filter_properties = {'ignore_hosts': []}
- if not FLAGS.allow_resize_to_same_host:
+ if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
+
args = {
"instance": instance,
"instance_type": new_instance_type,
@@ -1805,6 +1932,10 @@ class API(base.Base):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
+ def get_backdoor_port(self, context, host_name):
+ """Retrieve backdoor port."""
+ return self.compute_rpcapi.get_backdoor_port(context, host_name)
+
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
@@ -1880,7 +2011,7 @@ class API(base.Base):
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
if not instance['host']:
- raise exception.InstanceNotReady(instance=instance)
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
@@ -1891,6 +2022,14 @@ class API(base.Base):
return {'url': connect_info['access_url']}
+ def get_vnc_connect_info(self, context, instance, console_type):
+ """Used in a child cell to get console info."""
+ if not instance['host']:
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
+ connect_info = self.compute_rpcapi.get_vnc_console(context,
+ instance=instance, console_type=console_type)
+ return connect_info
+
@wrap_check_policy
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
@@ -2061,50 +2200,89 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
- disk_over_commit, host):
+ disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
- host, instance=instance)
+ host_name, instance=instance)
instance = self.update(context, instance,
task_state=task_states.MIGRATING,
expected_task_state=None)
self.scheduler_rpcapi.live_migration(context, block_migration,
- disk_over_commit, instance, host)
+ disk_over_commit, instance, host_name)
class HostAPI(base.Base):
- def __init__(self):
- self.compute_rpcapi = compute_rpcapi.ComputeAPI()
+ """Sub-set of the Compute Manager API for managing host operations."""
+
+ def __init__(self, rpcapi=None):
+ self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
super(HostAPI, self).__init__()
- """Sub-set of the Compute Manager API for managing host operations."""
- def set_host_enabled(self, context, host, enabled):
+ def _assert_host_exists(self, context, host_name):
+ """Raise HostNotFound if compute host doesn't exist."""
+ if not self.db.service_get_by_host_and_topic(context, host_name,
+ CONF.compute_topic):
+ raise exception.HostNotFound(host=host_name)
+
+ def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.set_host_enabled(context, enabled=enabled,
- host=host)
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.set_host_enabled(context, enabled=enabled,
+ host=host_name)
- def get_host_uptime(self, context, host):
+ def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.get_host_uptime(context, host=host)
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.get_host_uptime(context, host=host_name)
- def host_power_action(self, context, host, action):
+ def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
- # NOTE(comstud): No instance_uuid argument to this compute manager
- # call
- return self.compute_rpcapi.host_power_action(context, action=action,
- host=host)
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.host_power_action(context, action=action,
+ host=host_name)
- def set_host_maintenance(self, context, host, mode):
+ def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
- return self.compute_rpcapi.host_maintenance_mode(context,
- host_param=host, mode=mode, host=host)
+ self._assert_host_exists(context, host_name)
+ return self.rpcapi.host_maintenance_mode(context,
+ host_param=host_name, mode=mode, host=host_name)
+
+ def service_get_all(self, context, filters=None):
+ """Returns a list of services, optionally filtering the results.
+
+ If specified, 'filters' should be a dictionary containing services
+ attributes and matching values. Ie, to get a list of services for
+ the 'compute' topic, use filters={'topic': 'compute'}.
+ """
+ if filters is None:
+ filters = {}
+ services = self.db.service_get_all(context, False)
+ services = availability_zones.set_availability_zones(context,
+ services)
+ ret_services = []
+ for service in services:
+ for key, val in filters.iteritems():
+ if service[key] != val:
+ break
+ else:
+ # All filters matched.
+ ret_services.append(service)
+ return ret_services
+
+ def service_get_by_compute_host(self, context, host_name):
+ """Get service entry for the given compute hostname."""
+ return self.db.service_get_by_compute_host(context, host_name)
+
+ def instance_get_all_by_host(self, context, host_name):
+ """Return all instances on the given host."""
+ return self.db.instance_get_all_by_host(context, host_name)
class AggregateAPI(base.Base):
@@ -2115,18 +2293,15 @@ class AggregateAPI(base.Base):
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
- zones = [s.availability_zone for s in
- self.db.service_get_all_by_topic(context,
- FLAGS.compute_topic)]
- if availability_zone in zones:
- values = {"name": aggregate_name,
- "availability_zone": availability_zone}
- aggregate = self.db.aggregate_create(context, values)
- return dict(aggregate.iteritems())
- else:
- raise exception.InvalidAggregateAction(action='create_aggregate',
- aggregate_id="'N/A'",
- reason='invalid zone')
+
+ values = {"name": aggregate_name}
+ aggregate = self.db.aggregate_create(context, values,
+ metadata={'availability_zone': availability_zone})
+ aggregate = self._get_aggregate_info(context, aggregate)
+ # To maintain the same API result as before.
+ del aggregate['hosts']
+ del aggregate['metadata']
+ return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
@@ -2168,37 +2343,35 @@ class AggregateAPI(base.Base):
reason='not empty')
self.db.aggregate_delete(context, aggregate_id)
- def add_host_to_aggregate(self, context, aggregate_id, host):
+ def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(context, host)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
- if service.availability_zone != aggregate.availability_zone:
- raise exception.InvalidAggregateAction(
- action='add host',
- aggregate_id=aggregate_id,
- reason='availability zone mismatch')
- self.db.aggregate_host_add(context, aggregate_id, host)
+ self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
- aggregate=aggregate, host_param=host, host=host)
+ aggregate=aggregate, host_param=host_name, host=host_name)
return self.get_aggregate(context, aggregate_id)
- def remove_host_from_aggregate(self, context, aggregate_id, host):
+ def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(context, host)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
- self.db.aggregate_host_delete(context, aggregate_id, host)
+ self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
- aggregate=aggregate, host_param=host, host=host)
+ aggregate=aggregate, host_param=host_name, host=host_name)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
- metadata = self.db.aggregate_metadata_get(context, aggregate.id)
- hosts = self.db.aggregate_host_get_all(context, aggregate.id)
+ metadata = self.db.aggregate_metadata_get(context, aggregate['id'])
+ hosts = self.db.aggregate_host_get_all(context, aggregate['id'])
result = dict(aggregate.iteritems())
+ # metadetails was not originally included here. We need to pull it
+ # back out to maintain API stability.
+ del result['metadetails']
result["metadata"] = metadata
result["hosts"] = hosts
return result
@@ -2304,7 +2477,7 @@ class SecurityGroupAPI(base.Base):
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
- self.sgh = importutils.import_object(FLAGS.security_group_handler)
+ self.sgh = importutils.import_object(CONF.security_group_handler)
def validate_property(self, value, property, allowed):
"""
@@ -2429,7 +2602,7 @@ class SecurityGroupAPI(base.Base):
return groups
def destroy(self, context, security_group):
- if self.db.security_group_in_use(context, security_group.id):
+ if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
@@ -2441,12 +2614,12 @@ class SecurityGroupAPI(base.Base):
LOG.exception(_("Failed to update usages deallocating "
"security group"))
- LOG.audit(_("Delete security group %s"), security_group.name,
+ LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
- self.db.security_group_destroy(context, security_group.id)
+ self.db.security_group_destroy(context, security_group['id'])
self.sgh.trigger_security_group_destroy_refresh(context,
- security_group.id)
+ security_group['id'])
# Commit the reservations
if reservations:
@@ -2472,7 +2645,7 @@ class SecurityGroupAPI(base.Base):
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
- """Add security group to the instance"""
+ """Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
@@ -2502,7 +2675,7 @@ class SecurityGroupAPI(base.Base):
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
- """Remove the security group associated with the instance"""
+ """Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
@@ -2682,7 +2855,7 @@ class SecurityGroupAPI(base.Base):
"""Indicates whether the specified rule values are already
defined in the given security group.
"""
- for rule in security_group.rules:
+ for rule in security_group['rules']:
is_duplicate = True
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
new file mode 100644
index 000000000..d1d9a11d2
--- /dev/null
+++ b/nova/compute/cells_api.py
@@ -0,0 +1,532 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Compute API that proxies via Cells Service."""
+
+from nova import block_device
+from nova.cells import rpcapi as cells_rpcapi
+from nova.compute import api as compute_api
+from nova.compute import instance_types
+from nova.compute import vm_states
+from nova import exception
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+check_instance_state = compute_api.check_instance_state
+wrap_check_policy = compute_api.wrap_check_policy
+check_policy = compute_api.check_policy
+check_instance_lock = compute_api.check_instance_lock
+
+
+def validate_cell(fn):
+ def _wrapped(self, context, instance, *args, **kwargs):
+ self._validate_cell(instance, fn.__name__)
+ return fn(self, context, instance, *args, **kwargs)
+ _wrapped.__name__ = fn.__name__
+ return _wrapped
+
+
+class ComputeRPCAPINoOp(object):
+ def __getattr__(self, key):
+ def _noop_rpc_wrapper(*args, **kwargs):
+ return None
+ return _noop_rpc_wrapper
+
+
+class SchedulerRPCAPIRedirect(object):
+ def __init__(self, cells_rpcapi_obj):
+ self.cells_rpcapi = cells_rpcapi_obj
+
+ def __getattr__(self, key):
+ def _noop_rpc_wrapper(*args, **kwargs):
+ return None
+ return _noop_rpc_wrapper
+
+ def run_instance(self, context, **kwargs):
+ self.cells_rpcapi.schedule_run_instance(context, **kwargs)
+
+
+class ComputeCellsAPI(compute_api.API):
+ def __init__(self, *args, **kwargs):
+ super(ComputeCellsAPI, self).__init__(*args, **kwargs)
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+ # Avoid casts/calls directly to compute
+ self.compute_rpcapi = ComputeRPCAPINoOp()
+ # Redirect scheduler run_instance to cells.
+ self.scheduler_rpcapi = SchedulerRPCAPIRedirect(self.cells_rpcapi)
+
+ def _cell_read_only(self, cell_name):
+ """Is the target cell in a read-only mode?"""
+ # FIXME(comstud): Add support for this.
+ return False
+
+ def _validate_cell(self, instance, method):
+ cell_name = instance['cell_name']
+ if not cell_name:
+ raise exception.InstanceUnknownCell(
+ instance_uuid=instance['uuid'])
+ if self._cell_read_only(cell_name):
+ raise exception.InstanceInvalidState(
+ attr="vm_state",
+ instance_uuid=instance['uuid'],
+ state="temporary_readonly",
+ method=method)
+
+ def _cast_to_cells(self, context, instance, method, *args, **kwargs):
+ instance_uuid = instance['uuid']
+ cell_name = instance['cell_name']
+ if not cell_name:
+ raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
+ self.cells_rpcapi.cast_compute_api_method(context, cell_name,
+ method, instance_uuid, *args, **kwargs)
+
+ def _call_to_cells(self, context, instance, method, *args, **kwargs):
+ instance_uuid = instance['uuid']
+ cell_name = instance['cell_name']
+ if not cell_name:
+ raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
+ return self.cells_rpcapi.call_compute_api_method(context, cell_name,
+ method, instance_uuid, *args, **kwargs)
+
+ def _check_requested_networks(self, context, requested_networks):
+ """Override compute API's checking of this. It'll happen in
+ child cell
+ """
+ return
+
+ def _validate_image_href(self, context, image_href):
+ """Override compute API's checking of this. It'll happen in
+ child cell
+ """
+ return
+
+ def backup(self, context, instance, name, backup_type, rotation,
+ extra_properties=None, image_id=None):
+ """Backup the given instance."""
+ image_meta = super(ComputeCellsAPI, self).backup(context,
+ instance, name, backup_type, rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'backup', name,
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ return image_meta
+
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
+ """Snapshot the given instance."""
+ image_meta = super(ComputeCellsAPI, self).snapshot(context,
+ instance, name, extra_properties=extra_properties,
+ image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'snapshot',
+ name, extra_properties=extra_properties, image_id=image_id)
+ return image_meta
+
+ def create(self, *args, **kwargs):
+ """We can use the base functionality, but I left this here just
+ for completeness.
+ """
+ return super(ComputeCellsAPI, self).create(*args, **kwargs)
+
+ def update_state(self, context, instance, new_state):
+ """Updates the state of a compute instance.
+ For example to 'active' or 'error'.
+ Also sets 'task_state' to None.
+ Used by admin_actions api
+
+ :param context: The security context
+ :param instance: The instance to update
+ :param new_state: A member of vm_state to change
+ the instance's state to,
+ eg. 'active'
+ """
+ self.update(context, instance,
+ pass_on_state_change=True,
+ vm_state=new_state,
+ task_state=None)
+
+ def update(self, context, instance, pass_on_state_change=False, **kwargs):
+ """
+ Update an instance.
+ :param pass_on_state_change: if true, the state change will be passed
+ on to child cells
+ """
+ cell_name = instance['cell_name']
+ if cell_name and self._cell_read_only(cell_name):
+ raise exception.InstanceInvalidState(
+ attr="vm_state",
+ instance_uuid=instance['uuid'],
+ state="temporary_readonly",
+ method='update')
+ rv = super(ComputeCellsAPI, self).update(context,
+ instance, **kwargs)
+ kwargs_copy = kwargs.copy()
+ if not pass_on_state_change:
+ # We need to skip vm_state/task_state updates... those will
+ # happen via a _cast_to_cells when running a different
+ # compute api method
+ kwargs_copy.pop('vm_state', None)
+ kwargs_copy.pop('task_state', None)
+ if kwargs_copy:
+ try:
+ self._cast_to_cells(context, instance, 'update',
+ **kwargs_copy)
+ except exception.InstanceUnknownCell:
+ pass
+ return rv
+
+ def _local_delete(self, context, instance, bdms):
+ # This will get called for every delete in the API cell
+ # because _delete() in compute/api.py will not find a
+ # service when checking if it's up.
+ # We need to only take action if there's no cell_name. Our
+ # overrides of delete() and soft_delete() will take care of
+ # the rest.
+ cell_name = instance['cell_name']
+ if not cell_name:
+ return super(ComputeCellsAPI, self)._local_delete(context,
+ instance, bdms)
+
+ def soft_delete(self, context, instance):
+ self._handle_cell_delete(context, instance,
+ super(ComputeCellsAPI, self).soft_delete, 'soft_delete')
+
+ def delete(self, context, instance):
+ self._handle_cell_delete(context, instance,
+ super(ComputeCellsAPI, self).delete, 'delete')
+
+ def _handle_cell_delete(self, context, instance, method, method_name):
+ """Terminate an instance."""
+ # We can't use the decorator because we have special logic in the
+ # case we don't know the cell_name...
+ cell_name = instance['cell_name']
+ if cell_name and self._cell_read_only(cell_name):
+ raise exception.InstanceInvalidState(
+ attr="vm_state",
+ instance_uuid=instance['uuid'],
+ state="temporary_readonly",
+ method=method_name)
+ method(context, instance)
+ try:
+ self._cast_to_cells(context, instance, method_name)
+ except exception.InstanceUnknownCell:
+ # If there's no cell, there's also no host... which means
+ # the instance was destroyed from the DB here. Let's just
+ # broadcast a message down to all cells and hope this ends
+ # up resolving itself... Worse case.. the instance will
+ # show back up again here.
+ delete_type = method == 'soft_delete' and 'soft' or 'hard'
+ self.cells_rpcapi.instance_delete_everywhere(context,
+ instance['uuid'], delete_type)
+
+ @validate_cell
+ def restore(self, context, instance):
+ """Restore a previously deleted (but not reclaimed) instance."""
+ super(ComputeCellsAPI, self).restore(context, instance)
+ self._cast_to_cells(context, instance, 'restore')
+
+ @validate_cell
+ def force_delete(self, context, instance):
+ """Force delete a previously deleted (but not reclaimed) instance."""
+ super(ComputeCellsAPI, self).force_delete(context, instance)
+ self._cast_to_cells(context, instance, 'force_delete')
+
+ @validate_cell
+ def stop(self, context, instance, do_cast=True):
+ """Stop an instance."""
+ super(ComputeCellsAPI, self).stop(context, instance)
+ if do_cast:
+ self._cast_to_cells(context, instance, 'stop', do_cast=True)
+ else:
+ return self._call_to_cells(context, instance, 'stop',
+ do_cast=False)
+
+ @validate_cell
+ def start(self, context, instance):
+ """Start an instance."""
+ super(ComputeCellsAPI, self).start(context, instance)
+ self._cast_to_cells(context, instance, 'start')
+
+ @validate_cell
+ def reboot(self, context, instance, *args, **kwargs):
+ """Reboot the given instance."""
+ super(ComputeCellsAPI, self).reboot(context, instance,
+ *args, **kwargs)
+ self._cast_to_cells(context, instance, 'reboot', *args,
+ **kwargs)
+
+ @validate_cell
+ def rebuild(self, context, instance, *args, **kwargs):
+ """Rebuild the given instance with the provided attributes."""
+ super(ComputeCellsAPI, self).rebuild(context, instance, *args,
+ **kwargs)
+ self._cast_to_cells(context, instance, 'rebuild', *args, **kwargs)
+
+ @check_instance_state(vm_state=[vm_states.RESIZED])
+ @validate_cell
+ def revert_resize(self, context, instance):
+ """Reverts a resize, deleting the 'new' instance in the process."""
+ super(ComputeCellsAPI, self).revert_resize(context, instance)
+ self._cast_to_cells(context, instance, 'revert_resize')
+
+ @check_instance_state(vm_state=[vm_states.RESIZED])
+ @validate_cell
+ def confirm_resize(self, context, instance):
+ """Confirms a migration/resize and deletes the 'old' instance."""
+ super(ComputeCellsAPI, self).confirm_resize(context, instance)
+ self._cast_to_cells(context, instance, 'confirm_resize')
+
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
+ task_state=[None])
+ @validate_cell
+ def resize(self, context, instance, *args, **kwargs):
+ """Resize (ie, migrate) a running instance.
+
+ If flavor_id is None, the process is considered a migration, keeping
+ the original flavor_id. If flavor_id is not None, the instance should
+ be migrated to a new host and resized to the new flavor_id.
+ """
+ super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)
+
+ # NOTE(johannes): If we get to this point, then we know the
+ # specified flavor_id is valid and exists. We'll need to load
+ # it again, but that should be safe.
+
+ old_instance_type_id = instance['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+
+ flavor_id = kwargs.get('flavor_id')
+
+ if not flavor_id:
+ new_instance_type = old_instance_type
+ else:
+ new_instance_type = instance_types.get_instance_type_by_flavor_id(
+ flavor_id)
+
+ # NOTE(johannes): Later, when the resize is confirmed or reverted,
+ # the superclass implementations of those methods will need access
+ # to a local migration record for quota reasons. We don't need
+ # source and/or destination information, just the old and new
+ # instance_types. Status is set to 'finished' since nothing else
+ # will update the status along the way.
+ self.db.migration_create(context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': new_instance_type['id'],
+ 'status': 'finished'})
+
+ # FIXME(comstud): pass new instance_type object down to a method
+ # that'll unfold it
+ self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
+
+ @validate_cell
+ def add_fixed_ip(self, context, instance, *args, **kwargs):
+ """Add fixed_ip from specified network to given instance."""
+ super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
+ *args, **kwargs)
+ self._cast_to_cells(context, instance, 'add_fixed_ip',
+ *args, **kwargs)
+
+ @validate_cell
+ def remove_fixed_ip(self, context, instance, *args, **kwargs):
+ """Remove fixed_ip from specified network to given instance."""
+ super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
+ *args, **kwargs)
+ self._cast_to_cells(context, instance, 'remove_fixed_ip',
+ *args, **kwargs)
+
+ @validate_cell
+ def pause(self, context, instance):
+ """Pause the given instance."""
+ super(ComputeCellsAPI, self).pause(context, instance)
+ self._cast_to_cells(context, instance, 'pause')
+
+ @validate_cell
+ def unpause(self, context, instance):
+ """Unpause the given instance."""
+ super(ComputeCellsAPI, self).unpause(context, instance)
+ self._cast_to_cells(context, instance, 'unpause')
+
+ def set_host_enabled(self, context, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ # FIXME(comstud): Since there's no instance here, we have no
+ # idea which cell should be the target.
+ pass
+
+ def host_power_action(self, context, host, action):
+ """Reboots, shuts down or powers up the host."""
+ # FIXME(comstud): Since there's no instance here, we have no
+ # idea which cell should be the target.
+ pass
+
+ def get_diagnostics(self, context, instance):
+ """Retrieve diagnostics for the given instance."""
+ # FIXME(comstud): Cache this?
+ # Also: only calling super() to get state/policy checking
+ super(ComputeCellsAPI, self).get_diagnostics(context, instance)
+ return self._call_to_cells(context, instance, 'get_diagnostics')
+
+ @validate_cell
+ def suspend(self, context, instance):
+ """Suspend the given instance."""
+ super(ComputeCellsAPI, self).suspend(context, instance)
+ self._cast_to_cells(context, instance, 'suspend')
+
+ @validate_cell
+ def resume(self, context, instance):
+ """Resume the given instance."""
+ super(ComputeCellsAPI, self).resume(context, instance)
+ self._cast_to_cells(context, instance, 'resume')
+
+ @validate_cell
+ def rescue(self, context, instance, rescue_password=None):
+ """Rescue the given instance."""
+ super(ComputeCellsAPI, self).rescue(context, instance,
+ rescue_password=rescue_password)
+ self._cast_to_cells(context, instance, 'rescue',
+ rescue_password=rescue_password)
+
+ @validate_cell
+ def unrescue(self, context, instance):
+ """Unrescue the given instance."""
+ super(ComputeCellsAPI, self).unrescue(context, instance)
+ self._cast_to_cells(context, instance, 'unrescue')
+
+ @validate_cell
+ def set_admin_password(self, context, instance, password=None):
+ """Set the root/admin password for the given instance."""
+ super(ComputeCellsAPI, self).set_admin_password(context, instance,
+ password=password)
+ self._cast_to_cells(context, instance, 'set_admin_password',
+ password=password)
+
+ @validate_cell
+ def inject_file(self, context, instance, *args, **kwargs):
+ """Write a file to the given instance."""
+ super(ComputeCellsAPI, self).inject_file(context, instance, *args,
+ **kwargs)
+ self._cast_to_cells(context, instance, 'inject_file', *args, **kwargs)
+
+ @wrap_check_policy
+ @validate_cell
+ def get_vnc_console(self, context, instance, console_type):
+ """Get a url to a VNC Console."""
+ if not instance['host']:
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
+
+ connect_info = self._call_to_cells(context, instance,
+ 'get_vnc_connect_info', console_type)
+
+ self.consoleauth_rpcapi.authorize_console(context,
+ connect_info['token'], console_type, connect_info['host'],
+ connect_info['port'], connect_info['internal_access_path'])
+ return {'url': connect_info['access_url']}
+
+ @validate_cell
+ def get_console_output(self, context, instance, *args, **kwargs):
+ """Get console output for an an instance."""
+ # NOTE(comstud): Calling super() just to get policy check
+ super(ComputeCellsAPI, self).get_console_output(context, instance,
+ *args, **kwargs)
+ return self._call_to_cells(context, instance, 'get_console_output',
+ *args, **kwargs)
+
+ def lock(self, context, instance):
+ """Lock the given instance."""
+ super(ComputeCellsAPI, self).lock(context, instance)
+ self._cast_to_cells(context, instance, 'lock')
+
+ def unlock(self, context, instance):
+ """Unlock the given instance."""
+ super(ComputeCellsAPI, self).lock(context, instance)
+ self._cast_to_cells(context, instance, 'unlock')
+
+ @validate_cell
+ def reset_network(self, context, instance):
+ """Reset networking on the instance."""
+ super(ComputeCellsAPI, self).reset_network(context, instance)
+ self._cast_to_cells(context, instance, 'reset_network')
+
+ @validate_cell
+ def inject_network_info(self, context, instance):
+ """Inject network info for the instance."""
+ super(ComputeCellsAPI, self).inject_network_info(context, instance)
+ self._cast_to_cells(context, instance, 'inject_network_info')
+
+ @wrap_check_policy
+ @validate_cell
+ def attach_volume(self, context, instance, volume_id, device=None):
+ """Attach an existing volume to an existing instance."""
+ if device and not block_device.match_device(device):
+ raise exception.InvalidDevicePath(path=device)
+ device = self.compute_rpcapi.reserve_block_device_name(
+ context, device=device, instance=instance, volume_id=volume_id)
+ try:
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_attach(context, volume)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.db.block_device_mapping_destroy_by_instance_and_device(
+ context, instance['uuid'], device)
+ self._cast_to_cells(context, instance, 'attach_volume',
+ volume_id, device)
+
+ @check_instance_lock
+ @validate_cell
+ def _detach_volume(self, context, instance, volume_id):
+ """Detach a volume from an instance."""
+ check_policy(context, 'detach_volume', instance)
+
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_detach(context, volume)
+ self._cast_to_cells(context, instance, 'detach_volume',
+ volume_id)
+
+ @wrap_check_policy
+ @validate_cell
+ def associate_floating_ip(self, context, instance, address):
+ """Makes calls to network_api to associate_floating_ip.
+
+ :param address: is a string floating ip address
+ """
+ self._cast_to_cells(context, instance, 'associate_floating_ip',
+ address)
+
+ @validate_cell
+ def delete_instance_metadata(self, context, instance, key):
+ """Delete the given metadata item from an instance."""
+ super(ComputeCellsAPI, self).delete_instance_metadata(context,
+ instance, key)
+ self._cast_to_cells(context, instance, 'delete_instance_metadata',
+ key)
+
+ @wrap_check_policy
+ @validate_cell
+ def update_instance_metadata(self, context, instance,
+ metadata, delete=False):
+ rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
+ instance, metadata, delete=delete)
+ try:
+ self._cast_to_cells(context, instance,
+ 'update_instance_metadata',
+ metadata, delete=delete)
+ except exception.InstanceUnknownCell:
+ pass
+ return rv
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 6415ae187..fc534fd23 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -17,7 +17,6 @@
Claim objects for use with resource tracking.
"""
-from nova import context
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
@@ -27,7 +26,10 @@ COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
class NopClaim(object):
- """For use with compute drivers that do not support resource tracking"""
+ """For use with compute drivers that do not support resource tracking."""
+
+ def __init__(self, migration=None):
+ self.migration = migration
@property
def disk_gb(self):
@@ -184,3 +186,35 @@ class Claim(NopClaim):
LOG.info(msg, instance=self.instance)
return can_claim
+
+
+class ResizeClaim(Claim):
+ """Claim used for holding resources for an incoming resize/migration
+ operation.
+ """
+ def __init__(self, instance, instance_type, tracker):
+ super(ResizeClaim, self).__init__(instance, tracker)
+ self.instance_type = instance_type
+ self.migration = None
+
+ @property
+ def disk_gb(self):
+ return (self.instance_type['root_gb'] +
+ self.instance_type['ephemeral_gb'])
+
+ @property
+ def memory_mb(self):
+ return self.instance_type['memory_mb']
+
+ @property
+ def vcpus(self):
+ return self.instance_type['vcpus']
+
+ @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
+ def abort(self):
+ """Compute operation requiring claimed resources has failed or
+ been aborted.
+ """
+ LOG.debug(_("Aborting claim: %s") % self, instance=self.instance)
+ self.tracker.abort_resize_claim(self.instance['uuid'],
+ self.instance_type)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index fd796bd91..78129ee6b 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,15 +21,24 @@
"""Built-in instance properties."""
import re
+import uuid
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-FLAGS = flags.FLAGS
+instance_type_opts = [
+ cfg.StrOpt('default_instance_type',
+ default='m1.small',
+ help='default instance type to use, testing only'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(instance_type_opts)
+
LOG = logging.getLogger(__name__)
INVALID_NAME_REGEX = re.compile("[^\w\.\- ]")
@@ -40,11 +49,11 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
"""Creates instance types."""
if flavorid is None:
- flavorid = utils.gen_uuid()
+ flavorid = uuid.uuid4()
if swap is None:
swap = 0
if rxtx_factor is None:
- rxtx_factor = 1
+ rxtx_factor = 1.0
if ephemeral_gb is None:
ephemeral_gb = 0
@@ -64,20 +73,28 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
raise exception.InvalidInput(reason=msg)
# ensure some attributes are integers and greater than or equal to 0
- for option in kwargs:
+ for option in ['memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb', 'swap']:
try:
kwargs[option] = int(kwargs[option])
assert kwargs[option] >= 0
except (ValueError, AssertionError):
- msg = _("create arguments must be positive integers")
+ msg = _("'%s' argument must be a positive integer") % option
raise exception.InvalidInput(reason=msg)
+ # rxtx_factor should be a positive float
+ try:
+ kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
+ assert kwargs['rxtx_factor'] > 0
+ except (ValueError, AssertionError):
+ msg = _("'rxtx_factor' argument must be a positive float")
+ raise exception.InvalidInput(reason=msg)
+
# some value are required to be nonzero, not just positive
for option in ['memory_mb', 'vcpus']:
try:
assert kwargs[option] > 0
except AssertionError:
- msg = _("create arguments must be positive integers")
+ msg = _("'%s' argument must be greater than 0") % option
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
@@ -86,6 +103,9 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
kwargs['flavorid'] = unicode(flavorid)
# ensure is_public attribute is boolean
+ if not utils.is_valid_boolstr(is_public):
+ msg = _("is_public must be a boolean")
+ raise exception.InvalidInput(reason=msg)
kwargs['is_public'] = utils.bool_from_str(is_public)
try:
@@ -126,7 +146,7 @@ get_all_flavors = get_all_types
def get_default_instance_type():
"""Get the default instance type."""
- name = FLAGS.default_instance_type
+ name = CONF.default_instance_type
return get_instance_type_by_name(name)
@@ -169,7 +189,7 @@ def get_instance_type_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
def get_instance_type_access_by_flavor_id(flavorid, ctxt=None):
- """Retrieve instance type access list by flavor id"""
+ """Retrieve instance type access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
@@ -177,7 +197,7 @@ def get_instance_type_access_by_flavor_id(flavorid, ctxt=None):
def add_instance_type_access(flavorid, projectid, ctxt=None):
- """Add instance type access for project"""
+ """Add instance type access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
@@ -185,7 +205,7 @@ def add_instance_type_access(flavorid, projectid, ctxt=None):
def remove_instance_type_access(flavorid, projectid, ctxt=None):
- """Remove instance type access for project"""
+ """Remove instance type access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6d69fbac4..3bf8e61ef 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -25,13 +25,6 @@ building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
-**Related Flags**
-
-:instances_path: Where instances are kept on disk
-:base_dir_name: Where cached images are stored under instances_path
-:compute_driver: Name of class that is used to handle virtualization, loaded
- by :func:`nova.openstack.common.importutils.import_object`
-
"""
import contextlib
@@ -40,10 +33,12 @@ import socket
import sys
import time
import traceback
+import uuid
from eventlet import greenthread
from nova import block_device
+from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import instance_types
from nova.compute import power_state
@@ -52,14 +47,14 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import conductor
import nova.context
from nova import exception
-from nova import flags
+from nova import hooks
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
-from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
@@ -68,31 +63,70 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
+from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova import paths
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
+from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
compute_opts = [
- cfg.StrOpt('instances_path',
- default='$state_path/instances',
- help='where instances are stored on disk'),
- cfg.StrOpt('base_dir_name',
- default='_base',
- help="Where cached images are stored under $instances_path."
- "This is NOT the full path - just a folder name."
- "For per-compute-host cached images, set to _base_$my_ip"),
cfg.StrOpt('console_host',
default=socket.getfqdn(),
help='Console proxy host to use to connect '
'to instances on this host.'),
+ cfg.StrOpt('default_access_ip_network_name',
+ default=None,
+ help='Name of network to use to set access ips for instances'),
+ cfg.BoolOpt('defer_iptables_apply',
+ default=False,
+ help='Whether to batch up the application of IPTables rules'
+ ' during a host restart and apply all at the end of the'
+ ' init phase'),
+ cfg.StrOpt('instances_path',
+ default=paths.state_path_def('instances'),
+ help='where instances are stored on disk'),
+ cfg.BoolOpt('instance_usage_audit',
+ default=False,
+ help="Generate periodic compute.instance.exists notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
+ cfg.BoolOpt('resume_guests_state_on_host_boot',
+ default=False,
+ help='Whether to start guests that were running before the '
+ 'host rebooted'),
+ ]
+
+interval_opts = [
+ cfg.IntOpt('bandwidth_poll_interval',
+ default=600,
+ help='interval to pull bandwidth usage info'),
+ cfg.IntOpt("heal_instance_info_cache_interval",
+ default=60,
+ help="Number of seconds between instance info_cache self "
+ "healing updates"),
+ cfg.IntOpt('host_state_interval',
+ default=120,
+ help='Interval in seconds for querying the host status'),
+ cfg.IntOpt("image_cache_manager_interval",
+ default=2400,
+ help='Number of seconds to wait between runs of the image '
+ 'cache manager'),
+ cfg.IntOpt('reclaim_instance_interval',
+ default=0,
+ help='Interval in seconds for reclaiming deleted instances'),
+ cfg.IntOpt('volume_usage_poll_interval',
+ default=0,
+ help='Interval in seconds for gathering volume usages'),
+]
+
+timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
@@ -111,37 +145,34 @@ compute_opts = [
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
- cfg.IntOpt('host_state_interval',
- default=120,
- help='Interval in seconds for querying the host status'),
- cfg.IntOpt("running_deleted_instance_timeout",
- default=0,
- help="Number of seconds after being deleted when a running "
- "instance should be considered eligible for cleanup."),
- cfg.IntOpt("running_deleted_instance_poll_interval",
- default=30,
- help="Number of periodic scheduler ticks to wait between "
- "runs of the cleanup task."),
+]
+
+running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="log",
help="Action to take if a running deleted instance is detected."
"Valid options are 'noop', 'log' and 'reap'. "
"Set to 'noop' to disable."),
- cfg.IntOpt("image_cache_manager_interval",
- default=40,
- help="Number of periodic scheduler ticks to wait between "
- "runs of the image cache manager."),
- cfg.IntOpt("heal_instance_info_cache_interval",
- default=60,
- help="Number of seconds between instance info_cache self "
- "healing updates"),
- cfg.BoolOpt('instance_usage_audit',
- default=False,
- help="Generate periodic compute.instance.exists notifications"),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(compute_opts)
+ cfg.IntOpt("running_deleted_instance_poll_interval",
+ default=1800,
+ help="Number of seconds to wait between runs of the cleanup "
+ "task."),
+ cfg.IntOpt("running_deleted_instance_timeout",
+ default=0,
+ help="Number of seconds after being deleted when a running "
+ "instance should be considered eligible for cleanup."),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(compute_opts)
+CONF.register_opts(interval_opts)
+CONF.register_opts(timeout_opts)
+CONF.register_opts(running_deleted_opts)
+CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
+CONF.import_opt('console_topic', 'nova.console.rpcapi')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('network_manager', 'nova.service')
QUOTAS = quota.QUOTAS
@@ -153,7 +184,7 @@ def publisher_id(host=None):
def reverts_task_state(function):
- """Decorator to revert task_state on failure"""
+ """Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
@@ -216,69 +247,101 @@ class ComputeVirtAPI(virtapi.VirtAPI):
self._compute = compute
def instance_update(self, context, instance_uuid, updates):
- return self._compute.db.instance_update_and_get_original(context,
- instance_uuid,
- updates)
+ return self._compute._instance_update(context,
+ instance_uuid,
+ **updates)
def instance_get_by_uuid(self, context, instance_uuid):
- return self._compute.db.instance_get_by_uuid(context, instance_uuid)
+ return self._compute.conductor_api.instance_get_by_uuid(
+ context, instance_uuid)
def instance_get_all_by_host(self, context, host):
- return self._compute.db.instance_get_all_by_host(context, host)
+ return self._compute.conductor_api.instance_get_all_by_host(
+ context, host)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ return self._compute.conductor_api.aggregate_get_by_host(context,
+ host, key=key)
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ return self._compute.conductor_api.aggregate_metadata_add(
+ context, aggregate, metadata, set_delete=set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ return self._compute.conductor_api.aggregate_metadata_delete(
+ context, aggregate, key)
+
+ def security_group_get_by_instance(self, context, instance):
+ return self._compute.conductor_api.security_group_get_by_instance(
+ context, instance)
+
+ def security_group_rule_get_by_security_group(self, context,
+ security_group):
+ return (self._compute.conductor_api.
+ security_group_rule_get_by_security_group(context,
+ security_group))
+
+ def provider_fw_rule_get_all(self, context):
+ return self._compute.conductor_api.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return self._compute.conductor_api.agent_build_get_by_triple(
+ context, hypervisor, os, architecture)
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.16'
+ RPC_API_VERSION = '2.23'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
- # TODO(vish): sync driver creation logic with the rest of the system
- # and re-document the module docstring
- if not compute_driver:
- compute_driver = FLAGS.compute_driver
-
- if not compute_driver:
- LOG.error(_("Compute driver option required, but not specified"))
- sys.exit(1)
-
self.virtapi = ComputeVirtAPI(self)
-
- LOG.info(_("Loading compute driver '%s'") % compute_driver)
- try:
- self.driver = utils.check_isinstance(
- importutils.import_object_ns('nova.virt', compute_driver,
- self.virtapi),
- driver.ComputeDriver)
- except ImportError as e:
- LOG.error(_("Unable to load the virtualization driver: %s") % (e))
- sys.exit(1)
-
+ self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.network_api = network.API()
self.volume_api = volume.API()
self.network_manager = importutils.import_object(
- FLAGS.network_manager, host=kwargs.get('host', None))
+ CONF.network_manager, host=kwargs.get('host', None))
self._last_host_check = 0
self._last_bw_usage_poll = 0
+ self._last_vol_usage_poll = 0
self._last_info_cache_heal = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+ self.conductor_api = conductor.API()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
- self.resource_tracker = resource_tracker.ResourceTracker(self.host,
- self.driver)
+ self._resource_tracker_dict = {}
+
+ def _get_resource_tracker(self, nodename):
+ rt = self._resource_tracker_dict.get(nodename)
+ if not rt:
+ if nodename not in self.driver.get_available_nodes():
+ msg = _("%(nodename)s is not a valid node managed by this "
+ "compute host.") % locals()
+ raise exception.NovaException(msg)
+
+ rt = resource_tracker.ResourceTracker(self.host,
+ self.driver,
+ nodename)
+ self._resource_tracker_dict[nodename] = rt
+ return rt
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
- (old_ref, instance_ref) = self.db.instance_update_and_get_original(
- context, instance_uuid, kwargs)
- self.resource_tracker.update_usage(context, instance_ref)
- notifications.send_update(context, old_ref, instance_ref)
+ instance_ref = self.conductor_api.instance_update(context,
+ instance_uuid,
+ **kwargs)
+ if (instance_ref['host'] == self.host and
+ instance_ref['node'] in self.driver.get_available_nodes()):
+
+ rt = self._get_resource_tracker(instance_ref.get('node'))
+ rt.update_usage(context, instance_ref)
return instance_ref
@@ -291,79 +354,149 @@ class ComputeManager(manager.SchedulerDependentManager):
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
- def init_host(self):
- """Initialization for a standalone compute service."""
- self.driver.init_host(host=self.host)
- context = nova.context.get_admin_context()
- instances = self.db.instance_get_all_by_host(context, self.host)
+ def _get_instances_on_driver(self, context):
+ """Return a list of instance records that match the instances found
+ on the hypervisor.
+ """
+ local_instances = []
+ try:
+ # Try to find all local instances by uuid.
+ # FIXME(comstud): Would be nice to consolidate this into
+ # a single query to nova-conductor.
+ for uuid in self.driver.list_instance_uuids():
+ try:
+ instance = self.conductor_api.instance_get_by_uuid(
+ context, uuid)
+ local_instances.append(instance)
+ except exception.InstanceNotFound as e:
+ LOG.error(_('Instance %(uuid)s found in the '
+ 'hypervisor, but not in the database'),
+ locals())
+ continue
+ return local_instances
+ except NotImplementedError:
+ pass
- if FLAGS.defer_iptables_apply:
- self.driver.filter_defer_apply_on()
+ # The driver doesn't support uuids listing, so we'll have
+ # to brute force.
+ driver_instances = self.driver.list_instances()
+ all_instances = self.conductor_api.instance_get_all(context)
+ name_map = dict([(instance['name'], instance)
+ for instance in all_instances])
+ local_instances = []
+ for driver_instance in driver_instances:
+ instance = name_map.get(driver_instance)
+ if not instance:
+ LOG.error(_('Instance %(driver_instance)s found in the '
+ 'hypervisor, but not in the database'),
+ locals())
+ continue
+ local_instances.append(instance)
+ return local_instances
- try:
- for count, instance in enumerate(instances):
- db_state = instance['power_state']
- drv_state = self._get_power_state(context, instance)
- closing_vm_states = (vm_states.DELETED,
- vm_states.SOFT_DELETED)
-
- # instance was supposed to shut down - don't attempt
- # recovery in any case
- if instance['vm_state'] in closing_vm_states:
- continue
+ def _destroy_evacuated_instances(self, context):
+ """Destroys evacuated instances.
- expect_running = (db_state == power_state.RUNNING and
- drv_state != db_state)
+ While nova-compute was down, the instances running on it could be
+ evacuated to another host. Check that the instances reported
+ by the driver are still associated with this host. If they are
+ not, destroy them.
+ """
+ our_host = self.host
+ local_instances = self._get_instances_on_driver(context)
+ for instance in local_instances:
+ instance_host = instance['host']
+ instance_name = instance['name']
+ if instance['host'] != our_host:
+ LOG.info(_('Deleting instance as its host ('
+ '%(instance_host)s) is not equal to our '
+ 'host (%(our_host)s).'),
+ locals(), instance=instance)
+ network_info = self._get_instance_nw_info(context, instance)
+ bdi = self._get_instance_volume_block_device_info(context,
+ instance)
+ self.driver.destroy(instance,
+ self._legacy_nw_info(network_info),
+ bdi,
+ False)
+
+ def _init_instance(self, context, instance):
+ '''Initialize this instance during service init.'''
+ db_state = instance['power_state']
+ drv_state = self._get_power_state(context, instance)
+ closing_vm_states = (vm_states.DELETED,
+ vm_states.SOFT_DELETED)
+
+ # instance was supposed to shut down - don't attempt
+ # recovery in any case
+ if instance['vm_state'] in closing_vm_states:
+ return
- LOG.debug(_('Current state is %(drv_state)s, state in DB is '
- '%(db_state)s.'), locals(), instance=instance)
+ expect_running = (db_state == power_state.RUNNING and
+ drv_state != db_state)
- net_info = compute_utils.get_nw_info_for_instance(instance)
+ LOG.debug(_('Current state is %(drv_state)s, state in DB is '
+ '%(db_state)s.'), locals(), instance=instance)
- # We're calling plug_vifs to ensure bridge and iptables
- # filters are present, calling it once is enough.
- if count == 0:
- legacy_net_info = self._legacy_nw_info(net_info)
- self.driver.plug_vifs(instance, legacy_net_info)
+ net_info = compute_utils.get_nw_info_for_instance(instance)
- if ((expect_running and FLAGS.resume_guests_state_on_host_boot)
- or FLAGS.start_guests_on_host_boot):
- LOG.info(
- _('Rebooting instance after nova-compute restart.'),
- locals(), instance=instance)
+ # We're calling plug_vifs to ensure bridge and iptables
+ # rules exist. This needs to be called for each instance.
+ legacy_net_info = self._legacy_nw_info(net_info)
+ self.driver.plug_vifs(instance, legacy_net_info)
- block_device_info = \
- self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ if expect_running and CONF.resume_guests_state_on_host_boot:
+ LOG.info(
+ _('Rebooting instance after nova-compute restart.'),
+ locals(), instance=instance)
- try:
- self.driver.resume_state_on_host_boot(
- context,
- instance,
- self._legacy_nw_info(net_info),
- block_device_info)
- except NotImplementedError:
- LOG.warning(_('Hypervisor driver does not support '
- 'resume guests'), instance=instance)
+ block_device_info = \
+ self._get_instance_volume_block_device_info(
+ context, instance['uuid'])
- elif drv_state == power_state.RUNNING:
- # VMWareAPI drivers will raise an exception
- try:
- self.driver.ensure_filtering_rules_for_instance(
- instance,
- self._legacy_nw_info(net_info))
- except NotImplementedError:
- LOG.warning(_('Hypervisor driver does not support '
- 'firewall rules'), instance=instance)
+ try:
+ self.driver.resume_state_on_host_boot(
+ context,
+ instance,
+ self._legacy_nw_info(net_info),
+ block_device_info)
+ except NotImplementedError:
+ LOG.warning(_('Hypervisor driver does not support '
+ 'resume guests'), instance=instance)
+
+ elif drv_state == power_state.RUNNING:
+ # VMwareAPI drivers will raise an exception
+ try:
+ self.driver.ensure_filtering_rules_for_instance(
+ instance,
+ self._legacy_nw_info(net_info))
+ except NotImplementedError:
+ LOG.warning(_('Hypervisor driver does not support '
+ 'firewall rules'), instance=instance)
+
+ def init_host(self):
+ """Initialization for a standalone compute service."""
+ self.driver.init_host(host=self.host)
+ context = nova.context.get_admin_context()
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
+ if CONF.defer_iptables_apply:
+ self.driver.filter_defer_apply_on()
+
+ try:
+ # checking that instance was not already evacuated to other host
+ self._destroy_evacuated_instances(context)
+ for instance in instances:
+ self._init_instance(context, instance)
finally:
- if FLAGS.defer_iptables_apply:
+ if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._report_driver_status(context)
self.publish_service_capabilities(context)
- def pre_start_hook(self):
+ def pre_start_hook(self, **kwargs):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources.
@@ -378,6 +511,10 @@ class ComputeManager(manager.SchedulerDependentManager):
except exception.NotFound:
return power_state.NOSTATE
+ def get_backdoor_port(self, context):
+ """Return backdoor port for eventlet_backdoor."""
+ return self.backdoor_port
+
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
@@ -386,8 +523,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
#TODO(mdragon): perhaps make this variable by console_type?
return rpc.queue_get_for(context,
- FLAGS.console_topic,
- FLAGS.console_host)
+ CONF.console_topic,
+ CONF.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@@ -427,24 +564,29 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
- # get the network info from network
+ # Get the network info from network API, but don't let it
+ # update the cache, as that will hit the DB. We'll update
+ # the cache ourselves via the conductor.
network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ instance, update_cache=False)
+ cache = {'network_info': network_info.json()}
+ self.conductor_api.instance_info_cache_update(context,
+ instance,
+ cache)
return network_info
def _legacy_nw_info(self, network_info):
- """Converts the model nw_info object to legacy style"""
+ """Converts the model nw_info object to legacy style."""
if self.driver.legacy_nwinfo():
network_info = network_info.legacy()
return network_info
- def _setup_block_device_mapping(self, context, instance):
- """setup volumes for block device mapping"""
+ def _setup_block_device_mapping(self, context, instance, bdms):
+ """setup volumes for block device mapping."""
block_device_mapping = []
swap = None
ephemerals = []
- for bdm in self.db.block_device_mapping_get_all_by_instance(
- context, instance['uuid']):
+ for bdm in bdms:
LOG.debug(_('Setting up bdm %s'), bdm, instance=instance)
if bdm['no_device']:
@@ -479,7 +621,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if volume['status'] != 'creating':
break
greenthread.sleep(1)
- self.db.block_device_mapping_update(
+ self.conductor_api.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
@@ -490,7 +632,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance,
volume,
bdm['device_name'])
- self.db.block_device_mapping_update(
+ self.conductor_api.block_device_mapping_update(
context, bdm['id'],
{'connection_info': jsonutils.dumps(cinfo)})
bdmap = {'connection_info': cinfo,
@@ -507,28 +649,40 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
- admin_password, is_first_time, instance):
+ admin_password, is_first_time, node, instance):
"""Launch a new instance with specified options."""
context = context.elevated()
try:
self._check_instance_not_already_created(context, instance)
image_meta = self._check_image_size(context, instance)
- extra_usage_info = {"image_name": image_meta['name']}
+
+ if node is None:
+ node = self.driver.get_available_nodes()[0]
+ LOG.debug(_("No node specified, defaulting to %(node)s") %
+ locals())
+
+ if image_meta:
+ extra_usage_info = {"image_name": image_meta['name']}
+ else:
+ extra_usage_info = {}
+
self._start_building(context, instance)
self._notify_about_instance_usage(
context, instance, "create.start",
extra_usage_info=extra_usage_info)
network_info = None
+ bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
+ context, instance)
+ rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
- with self.resource_tracker.instance_claim(context, instance,
- limits):
-
+ with rt.instance_claim(context, instance, limits):
+ macs = self.driver.macs_for_instance(instance)
network_info = self._allocate_network(context, instance,
- requested_networks)
+ requested_networks, macs)
block_device_info = self._prep_block_device(context,
- instance)
+ instance, bdms)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password)
@@ -542,15 +696,17 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.exception(msg, instance=instance)
raise
except Exception:
+ exc_info = sys.exc_info()
# try to re-schedule instance:
- self._reschedule_or_reraise(context, instance,
+ self._reschedule_or_reraise(context, instance, exc_info,
requested_networks, admin_password, injected_files,
is_first_time, request_spec, filter_properties)
else:
# Spawn success:
if (is_first_time and not instance['access_ip_v4']
and not instance['access_ip_v6']):
- self._update_access_ip(context, instance, network_info)
+ instance = self._update_access_ip(context, instance,
+ network_info)
self._notify_about_instance_usage(context, instance,
"create.end", network_info=network_info,
@@ -565,16 +721,18 @@ class ComputeManager(manager.SchedulerDependentManager):
traceback.format_exception(type_, value, tb),
instance_uuid=instance_uuid)
- def _reschedule_or_reraise(self, context, instance, requested_networks,
- admin_password, injected_files, is_first_time,
- request_spec, filter_properties):
+ def _reschedule_or_reraise(self, context, instance, exc_info,
+ requested_networks, admin_password, injected_files, is_first_time,
+ request_spec, filter_properties):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
- exc_info = sys.exc_info()
instance_uuid = instance['uuid']
rescheduled = False
+ compute_utils.add_instance_fault_from_exc(context, instance_uuid,
+ exc_info[0], exc_info=exc_info)
+
try:
self._deallocate_network(context, instance)
except Exception:
@@ -588,9 +746,9 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(context, request_spec,
- instance['uuid'], filter_properties,
+ filter_properties, instance['uuid'],
self.scheduler_rpcapi.run_instance, method_args,
- task_state)
+ task_state, exc_info)
except Exception:
rescheduled = False
@@ -605,7 +763,8 @@ class ComputeManager(manager.SchedulerDependentManager):
raise exc_info[0], exc_info[1], exc_info[2]
def _reschedule(self, context, request_spec, filter_properties,
- instance_uuid, scheduler_method, method_args, task_state):
+ instance_uuid, scheduler_method, method_args, task_state,
+ exc_info=None):
"""Attempt to re-schedule a compute operation."""
retry = filter_properties.get('retry', None)
@@ -629,18 +788,23 @@ class ComputeManager(manager.SchedulerDependentManager):
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
+ if exc_info:
+ # stringify to avoid circular ref problem in json serialization:
+ retry['exc'] = traceback.format_exception(*exc_info)
+
scheduler_method(context, *method_args)
return True
@manager.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
- timeout = FLAGS.instance_build_timeout
+ timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING}
- building_insts = self.db.instance_get_all_by_filters(context, filters)
+ building_insts = self.conductor_api.instance_get_all_by_filters(
+ context, filters)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
@@ -651,15 +815,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def _update_access_ip(self, context, instance, nw_info):
"""Update the access ip values for a given instance.
- If FLAGS.default_access_ip_network_name is set, this method will
+ If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose from,
an arbitrary one will be chosen.
"""
- network_name = FLAGS.default_access_ip_network_name
+ network_name = CONF.default_access_ip_network_name
if not network_name:
- return
+ return instance
update_info = {}
for vif in nw_info:
@@ -670,8 +834,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if ip['version'] == 6:
update_info['access_ip_v6'] = ip['address']
if update_info:
- self.db.instance_update(context, instance.uuid, update_info)
- notifications.send_update(context, instance, instance)
+ instance = self._instance_update(context, instance['uuid'],
+ **update_info)
+ return instance
def _check_instance_not_already_created(self, context, instance):
"""Ensure an instance with the same name is not already present."""
@@ -694,7 +859,10 @@ class ComputeManager(manager.SchedulerDependentManager):
image, but is accurate because it reflects the image's
actual size.
"""
- image_meta = _get_image_meta(context, instance['image_ref'])
+ if instance['image_ref']:
+ image_meta = _get_image_meta(context, instance['image_ref'])
+ else: # Instance was started from volume - so no image ref
+ return {}
try:
size_bytes = image_meta['size']
@@ -743,18 +911,19 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=(task_states.SCHEDULING,
None))
- def _allocate_network(self, context, instance, requested_networks):
- """Allocate networks for an instance and return the network info"""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.NETWORKING,
- expected_task_state=None)
- is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
+ def _allocate_network(self, context, instance, requested_networks, macs):
+ """Allocate networks for an instance and return the network info."""
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.NETWORKING,
+ expected_task_state=None)
+ is_vpn = pipelib.is_vpn_image(instance['image_ref'])
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
- requested_networks=requested_networks)
+ requested_networks=requested_networks,
+ macs=macs)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
@@ -765,13 +934,13 @@ class ComputeManager(manager.SchedulerDependentManager):
return network_info
- def _prep_block_device(self, context, instance):
- """Set up the block device for an instance with error logging"""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
+ def _prep_block_device(self, context, instance, bdms):
+ """Set up the block device for an instance with error logging."""
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
- return self._setup_block_device_mapping(context, instance)
+ return self._setup_block_device_mapping(context, instance, bdms)
except Exception:
LOG.exception(_('Instance failed block device setup'),
instance=instance)
@@ -779,12 +948,11 @@ class ComputeManager(manager.SchedulerDependentManager):
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password):
- """Spawn an instance with error logging and update its power state"""
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.SPAWNING,
- expected_task_state=task_states.
- BLOCK_DEVICE_MAPPING)
+ """Spawn an instance with error logging and update its power state."""
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING,
+ expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
@@ -807,7 +975,7 @@ class ComputeManager(manager.SchedulerDependentManager):
extra_usage_info=None):
# NOTE(sirp): The only thing this wrapper function does extra is handle
# the passing in of `self.host`. Ordinarily this will just be
- # `FLAGS.host`, but `Manager`'s gets a chance to override this in its
+ # CONF.host`, but `Manager`'s gets a chance to override this in its
# `__init__`.
compute_utils.notify_about_instance_usage(
context, instance, event_suffix, network_info=network_info,
@@ -819,17 +987,17 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.deallocate_for_instance(context, instance)
def _get_volume_bdms(self, bdms):
- """Return only bdms that have a volume_id"""
+ """Return only bdms that have a volume_id."""
return [bdm for bdm in bdms if bdm['volume_id']]
# NOTE(danms): Legacy interface for digging up volumes in the database
- def _get_instance_volume_bdms(self, context, instance_uuid):
+ def _get_instance_volume_bdms(self, context, instance):
return self._get_volume_bdms(
- self.db.block_device_mapping_get_all_by_instance(context,
- instance_uuid))
+ self.conductor_api.block_device_mapping_get_all_by_instance(
+ context, instance))
- def _get_instance_volume_bdm(self, context, instance_uuid, volume_id):
- bdms = self._get_instance_volume_bdms(context, instance_uuid)
+ def _get_instance_volume_bdm(self, context, instance, volume_id):
+ bdms = self._get_instance_volume_bdms(context, instance)
for bdm in bdms:
# NOTE(vish): Comparing as strings because the os_api doesn't
# convert to integer and we may wish to support uuids
@@ -839,10 +1007,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(danms): This is a transitional interface until all the callers
# can provide their own bdms
- def _get_instance_volume_block_device_info(self, context, instance_uuid,
+ def _get_instance_volume_block_device_info(self, context, instance,
bdms=None):
if bdms is None:
- bdms = self._get_instance_volume_bdms(context, instance_uuid)
+ bdms = self._get_instance_volume_bdms(context, instance)
return self._get_volume_block_device_info(bdms)
def _get_volume_block_device_info(self, bdms):
@@ -870,7 +1038,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def run_instance(self, context, instance, request_spec=None,
filter_properties=None, requested_networks=None,
injected_files=None, admin_password=None,
- is_first_time=False):
+ is_first_time=False, node=None):
if filter_properties is None:
filter_properties = {}
@@ -881,7 +1049,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
- admin_password, is_first_time, instance)
+ admin_password, is_first_time, node, instance)
do_run_instance()
def _shutdown_instance(self, context, instance, bdms):
@@ -904,7 +1072,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(vish) get bdms before destroying the instance
vol_bdms = self._get_volume_bdms(bdms)
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'], bdms=bdms)
+ context, instance, bdms=bdms)
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
for bdm in vol_bdms:
@@ -935,10 +1103,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.volume_api.delete(context, volume)
# NOTE(vish): bdms will be deleted on instance destroy
+ @hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms):
"""Delete an instance on this host."""
instance_uuid = instance['uuid']
- self.db.instance_info_cache_delete(context, instance_uuid)
+ self.conductor_api.instance_info_cache_delete(context, instance)
self._notify_about_instance_usage(context, instance, "delete.start")
self._shutdown_instance(context, instance, bdms)
# NOTE(vish): We have already deleted the instance, so we have
@@ -961,13 +1130,12 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
- self.db.instance_destroy(context, instance_uuid)
- system_meta = self.db.instance_system_metadata_get(context,
- instance_uuid)
+ system_meta = compute_utils.metadata_to_dict(
+ instance['system_metadata'])
+ self.conductor_api.instance_destroy(context, instance)
# ensure block device mappings are not leaked
- for bdm in bdms:
- self.db.block_device_mapping_destroy(context, bdm['id'])
+ self.conductor_api.block_device_mapping_destroy(context, bdms)
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
@@ -975,7 +1143,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms=None):
- """Terminate an instance on this host. """
+ """Terminate an instance on this host."""
# Note(eglynn): we do not decorate this action with reverts_task_state
# because a failure during termination should leave the task state as
# DELETING, as a signal to the API layer that a subsequent deletion
@@ -985,7 +1153,7 @@ class ComputeManager(manager.SchedulerDependentManager):
elevated = context.elevated()
# NOTE(danms): remove this compatibility in the future
if not bdms:
- bdms = self._get_instance_volume_bdms(context, instance["uuid"])
+ bdms = self._get_instance_volume_bdms(context, instance)
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_terminate_instance(instance, bdms):
@@ -1011,13 +1179,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.STOPPED,
- expected_task_state=(task_states.POWERING_OFF,
- task_states.STOPPING),
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.STOPPED,
+ expected_task_state=(task_states.POWERING_OFF,
+ task_states.STOPPING),
+ task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
# NOTE(johannes): This is probably better named power_on_instance
@@ -1031,13 +1198,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=(task_states.POWERING_ON,
- task_states.STARTING))
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=(task_states.POWERING_ON,
+ task_states.STARTING))
self._notify_about_instance_usage(context, instance, "power_on.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1054,12 +1220,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# doesn't implement the soft_delete method
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SOFT_DELETED,
- expected_task_state=task_states.SOFT_DELETING,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1075,12 +1240,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# doesn't implement the restore method
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- expected_task_state=task_states.RESTORING,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ expected_task_state=task_states.RESTORING,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "restore.end")
# NOTE(johannes): In the folsom release, power_off_instance was poorly
@@ -1109,7 +1273,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@reverts_task_state
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
- injected_files, new_pass, orig_sys_metadata=None):
+ injected_files, new_pass, orig_sys_metadata=None,
+ bdms=None, recreate=False, on_shared_storage=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -1122,57 +1287,112 @@ class ComputeManager(manager.SchedulerDependentManager):
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
+ :param recreate: True if instance should be recreated with same disk
+ :param on_shared_storage: True if instance files on shared storage
"""
context = context.elevated()
+
+ orig_vm_state = instance['vm_state']
with self._error_out_instance_on_exception(context, instance['uuid']):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
- image_meta = _get_image_meta(context, image_ref)
+ if recreate:
+
+ if not self.driver.capabilities["supports_recreate"]:
+ # if driver doesn't support recreate return with failure
+ _msg = _('instance recreate is not implemented '
+ 'by this driver.')
+
+ LOG.warn(_msg, instance=instance)
+ self._instance_update(context,
+ instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.
+ REBUILDING)
+ raise exception.Invalid(_msg)
+
+ self._check_instance_not_already_created(context, instance)
+
+ # to cover case when admin expects that instance files are on
+ # shared storage, but not accessible and vice versa
+ if on_shared_storage != self.driver.instance_on_disk(instance):
+ _msg = _("Invalid state of instance files on "
+ "shared storage")
+ raise exception.Invalid(_msg)
+
+ if on_shared_storage:
+ LOG.info(_('disk on shared storage,'
+ 'recreating using existing disk'))
+ else:
+ image_ref = orig_image_ref = instance['image_ref']
+ LOG.info(_("disk not on shared storage"
+ "rebuilding from: '%s'") % str(image_ref))
+
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host)
+
+ if image_ref:
+ image_meta = _get_image_meta(context, image_ref)
+ else:
+ image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
- orig_image_ref_url = utils.generate_image_url(orig_image_ref)
+ orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
- extra_usage_info = {'image_name': image_meta['name']}
+ extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- task_state=task_states.REBUILDING,
- expected_task_state=task_states.REBUILDING)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ task_state=task_states.REBUILDING,
+ expected_task_state=task_states.REBUILDING)
+
+ if recreate:
+ # Detaching volumes.
+ for bdm in self._get_instance_volume_bdms(context, instance):
+ volume = self.volume_api.get(context, bdm['volume_id'])
- network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, self._legacy_nw_info(network_info))
+ # We can't run volume disconnect on source because
+ # the host is down. Just marking volume as detached
+ # in db, anyway the zombie instance going to be deleted
+ # from source during init_host when host comes back
+ self.volume_api.detach(context.elevated(), volume)
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.
- REBUILD_BLOCK_DEVICE_MAPPING,
- expected_task_state=task_states.REBUILDING)
+ self.network_api.setup_networks_on_host(context,
+ instance, self.host)
+ else:
+ network_info = self._get_instance_nw_info(context, instance)
+ self.driver.destroy(instance,
+ self._legacy_nw_info(network_info))
- instance.injected_files = injected_files
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
- device_info = self._setup_block_device_mapping(context, instance)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ expected_task_state=task_states.REBUILDING)
+
+ instance['injected_files'] = injected_files
+ network_info = self._get_instance_nw_info(context, instance)
+ if bdms is None:
+ capi = self.conductor_api
+ bdms = capi.block_device_mapping_get_all_by_instance(
+ context, instance)
+ device_info = self._setup_block_device_mapping(context, instance,
+ bdms)
+
+ expected_task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.REBUILD_SPAWNING,
+ expected_task_state=expected_task_state)
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.
- REBUILD_SPAWNING,
- expected_task_state=task_states.
- REBUILD_BLOCK_DEVICE_MAPPING)
- # pull in new password here since the original password isn't in
- # the db
admin_password = new_pass
self.driver.spawn(context, instance, image_meta,
@@ -1190,6 +1410,15 @@ class ComputeManager(manager.SchedulerDependentManager):
REBUILD_SPAWNING,
launched_at=timeutils.utcnow())
+ LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
+ if orig_vm_state == vm_states.STOPPED:
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
+ terminated_at=timeutils.utcnow(),
+ progress=0)
+ self.stop_instance(context, instance['uuid'])
+
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
@@ -1210,20 +1439,15 @@ class ComputeManager(manager.SchedulerDependentManager):
# is no longer needed
if block_device_info is None:
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'])
- # NOTE(danms): remove this when RPC API < 2.5 compatibility
- # is no longer needed
- if network_info is None:
- network_info = self._get_instance_nw_info(context, instance)
- else:
- network_info = network_model.NetworkInfo.hydrate(network_info)
+ context, instance)
+ network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
@@ -1244,10 +1468,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "reboot.end")
@@ -1270,9 +1494,8 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state)
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
@@ -1288,21 +1511,29 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "snapshot.start")
- self.driver.snapshot(context, instance, image_id)
-
if image_type == 'snapshot':
expected_task_state = task_states.IMAGE_SNAPSHOT
elif image_type == 'backup':
expected_task_state = task_states.IMAGE_BACKUP
- self._instance_update(context, instance['uuid'], task_state=None,
- expected_task_state=expected_task_state)
+ def update_task_state(task_state, expected_state=expected_task_state):
+ return self._instance_update(context, instance['uuid'],
+ task_state=task_state,
+ expected_task_state=expected_state)
+
+ self.driver.snapshot(context, instance, image_id, update_task_state)
+ # The instance could have changed from the driver. But since
+ # we're doing a fresh update here, we'll grab the changes.
+
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.IMAGE_UPLOADING)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
- elif image_type == 'backup' and rotation:
+ elif image_type == 'backup' and rotation >= 0:
self._rotate_backups(context, instance, backup_type, rotation)
elif image_type == 'backup':
@@ -1363,7 +1594,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if new_pass is None:
# Generate a random password
- new_pass = utils.generate_password(FLAGS.password_length)
+ new_pass = utils.generate_password()
max_tries = 10
@@ -1440,6 +1671,27 @@ class ComputeManager(manager.SchedulerDependentManager):
instance=instance)
self.driver.inject_file(instance, path, file_contents)
+ def _get_rescue_image_ref(self, context, instance):
+ """Determine what image should be used to boot the rescue VM."""
+ system_meta = compute_utils.metadata_to_dict(
+ instance['system_metadata'])
+
+ rescue_image_ref = system_meta.get('image_base_image_ref')
+
+ # 1. First try to use base image associated with instance's current
+ # image.
+ #
+ # The idea here is to provide the customer with a rescue environment
+ # which they are familiar with. So, if they built their instance off of
+ # a Debian image, their rescue VM wil also be Debian.
+ if rescue_image_ref:
+ return rescue_image_ref
+
+ # 2. As a last resort, use instance's current image
+ LOG.warn(_('Unable to find a different image to use for rescue VM,'
+ ' using instance\'s current image'))
+ return instance['image_ref']
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
@@ -1452,15 +1704,21 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
- utils.generate_password(FLAGS.password_length))
+ utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
- image_meta = _get_image_meta(context, instance['image_ref'])
+
+ rescue_image_ref = self._get_rescue_image_ref(context, instance)
+
+ if rescue_image_ref:
+ rescue_image_meta = _get_image_meta(context, rescue_image_ref)
+ else:
+ rescue_image_meta = {}
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.rescue(context, instance,
- self._legacy_nw_info(network_info), image_meta,
- admin_password)
+ self._legacy_nw_info(network_info),
+ rescue_image_meta, admin_password)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
@@ -1468,6 +1726,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.RESCUED,
task_state=None,
power_state=current_power_state,
+ launched_at=timeutils.utcnow(),
expected_task_state=task_states.RESCUING)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1507,8 +1766,7 @@ class ComputeManager(manager.SchedulerDependentManager):
migration=None, migration_id=None):
"""Destroys the source instance."""
if not migration:
- migration = self.db.migration_get(context.elevated(),
- migration_id)
+ migration = self.conductor_api.migration_get(context, migration_id)
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
@@ -1523,6 +1781,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.confirm_migration(migration, instance,
self._legacy_nw_info(network_info))
+ rt = self._get_resource_tracker(migration['source_node'])
+ rt.confirm_resize(context, migration)
+
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
@@ -1541,8 +1802,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
if not migration:
- migration = self.db.migration_get(context.elevated(),
- migration_id)
+ migration = self.conductor_api.migration_get(context, migration_id)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
@@ -1560,13 +1820,16 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ context, instance)
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
self._terminate_volume_connections(context, instance)
+ rt = self._get_resource_tracker(instance.get('node'))
+ rt.revert_resize(context, migration, status='reverted_dest')
+
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration['source_compute'],
reservations)
@@ -1582,10 +1845,8 @@ class ComputeManager(manager.SchedulerDependentManager):
in the database.
"""
- elevated = context.elevated()
-
if not migration:
- migration = self.db.migration_get(elevated, migration_id)
+ migration = self.conductor_api.migration_get(context, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
@@ -1594,18 +1855,24 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
+ old_instance_type = migration['old_instance_type_id']
+ instance_type = instance_types.get_instance_type(old_instance_type)
+
instance = self._instance_update(context,
- instance['uuid'],
- host=migration['source_compute'])
+ instance['uuid'],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ root_gb=instance_type['root_gb'],
+ ephemeral_gb=instance_type['ephemeral_gb'],
+ instance_type_id=instance_type['id'],
+ host=migration['source_compute'],
+ node=migration['source_node'])
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'])
- old_instance_type = migration['old_instance_type_id']
- instance_type = instance_types.get_instance_type(old_instance_type)
-
- bdms = self._get_instance_volume_bdms(context, instance['uuid'])
+ bdms = self._get_instance_volume_bdms(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ context, instance)
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
@@ -1619,26 +1886,18 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self._instance_update(context,
- instance['uuid'],
- memory_mb=instance_type['memory_mb'],
- vcpus=instance_type['vcpus'],
- root_gb=instance_type['root_gb'],
- ephemeral_gb=instance_type['ephemeral_gb'],
- instance_type_id=instance_type['id'],
- launched_at=timeutils.utcnow(),
- expected_task_state=task_states.
- RESIZE_REVERTING)
+ instance = self._instance_update(context,
+ instance['uuid'], launched_at=timeutils.utcnow(),
+ expected_task_state=task_states.RESIZE_REVERTING)
self.network_api.migrate_instance_finish(context, instance,
migration)
- self._instance_update(context, instance['uuid'],
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE, task_state=None)
- self.db.migration_update(elevated, migration['id'],
- {'status': 'reverted'})
+ rt = self._get_resource_tracker(instance.get('node'))
+ rt.revert_resize(context, migration)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
@@ -1655,51 +1914,64 @@ class ComputeManager(manager.SchedulerDependentManager):
if reservations:
QUOTAS.rollback(context, reservations)
+ def _prep_resize(self, context, image, instance, instance_type,
+ reservations, request_spec, filter_properties, node):
+
+ if not filter_properties:
+ filter_properties = {}
+
+ if not instance['host']:
+ self._set_instance_error_state(context, instance['uuid'])
+ msg = _('Instance has no source host')
+ raise exception.MigrationError(msg)
+
+ same_host = instance['host'] == self.host
+ if same_host and not CONF.allow_resize_to_same_host:
+ self._set_instance_error_state(context, instance['uuid'])
+ msg = _('destination same as source!')
+ raise exception.MigrationError(msg)
+
+ limits = filter_properties.get('limits', {})
+ rt = self._get_resource_tracker(node)
+ with rt.resize_claim(context, instance, instance_type, limits=limits) \
+ as claim:
+ migration_ref = claim.migration
+
+ LOG.audit(_('Migrating'), context=context,
+ instance=instance)
+ self.compute_rpcapi.resize_instance(context, instance,
+ migration_ref, image, instance_type, reservations)
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations=None, request_spec=None,
- filter_properties=None):
+ filter_properties=None, node=None):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
+ if node is None:
+ node = self.driver.get_available_nodes()[0]
+ LOG.debug(_("No node specified, defaulting to %(node)s") %
+ locals())
+
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
compute_utils.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
-
try:
- same_host = instance['host'] == self.host
- if same_host and not FLAGS.allow_resize_to_same_host:
- self._set_instance_error_state(context, instance['uuid'])
- msg = _('destination same as source!')
- raise exception.MigrationError(msg)
-
- old_instance_type = instance['instance_type']
-
- migration_ref = self.db.migration_create(context.elevated(),
- {'instance_uuid': instance['uuid'],
- 'source_compute': instance['host'],
- 'dest_compute': self.host,
- 'dest_host': self.driver.get_host_ip_addr(),
- 'old_instance_type_id': old_instance_type['id'],
- 'new_instance_type_id': instance_type['id'],
- 'status': 'pre-migrating'})
-
- LOG.audit(_('Migrating'), context=context,
- instance=instance)
- self.compute_rpcapi.resize_instance(context, instance,
- migration_ref, image, instance_type, reservations)
-
+ self._prep_resize(context, image, instance, instance_type,
+ reservations, request_spec, filter_properties, node)
except Exception:
# try to re-schedule the resize elsewhere:
+ exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
- instance_type, reservations, request_spec,
+ exc_info, instance_type, reservations, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
@@ -1710,7 +1982,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
- def _reschedule_resize_or_reraise(self, context, image, instance,
+ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, reservations, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
@@ -1720,10 +1992,12 @@ class ComputeManager(manager.SchedulerDependentManager):
if not filter_properties:
filter_properties = {}
- exc_info = sys.exc_info()
rescheduled = False
instance_uuid = instance['uuid']
+ compute_utils.add_instance_fault_from_exc(context, instance_uuid,
+ exc_info[0], exc_info=exc_info)
+
try:
scheduler_method = self.scheduler_rpcapi.prep_resize
method_args = (instance, instance_type, image, request_spec,
@@ -1732,7 +2006,7 @@ class ComputeManager(manager.SchedulerDependentManager):
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance_uuid, scheduler_method,
- method_args, task_state)
+ method_args, task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
@@ -1752,30 +2026,28 @@ class ComputeManager(manager.SchedulerDependentManager):
reservations=None, migration=None, migration_id=None,
instance_type=None):
"""Starts the migration of a running instance to another host."""
- elevated = context.elevated()
if not migration:
- migration = self.db.migration_get(elevated, migration_id)
+ migration = self.conductor_api.migration_get(context, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
if not instance_type:
- instance_type = self.db.instance_type_get(context,
+ instance_type = self.conductor_api.instance_type_get(context,
migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
- self.db.migration_update(elevated,
- migration['id'],
- {'status': 'migrating'})
+ migration = self.conductor_api.migration_update(context,
+ migration, 'migrating')
- self._instance_update(context, instance['uuid'],
- task_state=task_states.RESIZE_MIGRATING,
- expected_task_state=task_states.RESIZE_PREP)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.RESIZE_MIGRATING,
+ expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ context, instance)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration['dest_host'],
@@ -1787,15 +2059,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.migrate_instance_start(context, instance,
migration)
- migration = self.db.migration_update(elevated,
- migration['id'],
- {'status': 'post-migrating'})
+ migration = self.conductor_api.migration_update(context,
+ migration, 'post-migrating')
- self._instance_update(context, instance['uuid'],
- host=migration['dest_compute'],
- task_state=task_states.RESIZE_MIGRATED,
- expected_task_state=task_states.
- RESIZE_MIGRATING)
+ instance = self._instance_update(context, instance['uuid'],
+ host=migration['dest_compute'],
+ node=migration['dest_node'],
+ task_state=task_states.RESIZE_MIGRATED,
+ expected_task_state=task_states.
+ RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
@@ -1805,7 +2077,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info=network_info)
def _terminate_volume_connections(self, context, instance):
- bdms = self._get_instance_volume_bdms(context, instance['uuid'])
+ bdms = self._get_instance_volume_bdms(context, instance)
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
@@ -1840,7 +2112,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(context, instance)
- self._instance_update(context, instance['uuid'],
+ instance = self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_FINISH,
expected_task_state=task_states.RESIZE_MIGRATED)
@@ -1848,9 +2120,9 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "finish_resize.start",
network_info=network_info)
- bdms = self._get_instance_volume_bdms(context, instance['uuid'])
+ bdms = self._get_instance_volume_bdms(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'], bdms=bdms)
+ context, instance, bdms=bdms)
if bdms:
connector = self.driver.get_volume_connector(instance)
@@ -1865,6 +2137,9 @@ class ComputeManager(manager.SchedulerDependentManager):
image, resize_instance,
block_device_info)
+ migration = self.conductor_api.migration_update(context,
+ migration, 'finished')
+
instance = self._instance_update(context,
instance['uuid'],
vm_state=vm_states.RESIZED,
@@ -1873,9 +2148,6 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=task_states.
RESIZE_FINISH)
- self.db.migration_update(context.elevated(), migration['id'],
- {'status': 'finished'})
-
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@@ -1892,8 +2164,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
if not migration:
- migration = self.db.migration_get(context.elevated(),
- migration_id)
+ migration = self.conductor_api.migration_get(context, migration_id)
try:
self._finish_resize(context, instance, migration,
disk_info, image)
@@ -2029,12 +2300,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SUSPENDED,
- task_state=None,
- expected_task_state=task_states.SUSPENDING)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SUSPENDED,
+ task_state=None,
+ expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend')
@@ -2045,14 +2315,18 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.audit(_('Resuming'), context=context, instance=instance)
- self.driver.resume(instance)
+
+ network_info = self._get_instance_nw_info(context, instance)
+ block_device_info = self._get_instance_volume_block_device_info(
+ context, instance)
+
+ self.driver.resume(instance, self._legacy_nw_info(network_info),
+ block_device_info)
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context,
+ instance['uuid'], power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None)
self._notify_about_instance_usage(context, instance, 'resume')
@@ -2111,14 +2385,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug(_("Getting vnc console"), instance=instance)
- token = str(utils.gen_uuid())
+ token = str(uuid.uuid4())
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
- access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token)
+ access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
- access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token)
+ access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
@@ -2162,7 +2436,7 @@ class ComputeManager(manager.SchedulerDependentManager):
values = {'instance_uuid': instance['uuid'],
'volume_id': volume_id or 'reserved',
'device_name': result}
- self.db.block_device_mapping_create(context, values)
+ self.conductor_api.block_device_mapping_create(context, values)
return result
return do_reserve()
@@ -2176,8 +2450,9 @@ class ComputeManager(manager.SchedulerDependentManager):
mountpoint, instance)
except Exception:
with excutils.save_and_reraise_exception():
- self.db.block_device_mapping_destroy_by_instance_and_device(
- context, instance.get('uuid'), mountpoint)
+ capi = self.conductor_api
+ capi.block_device_mapping_destroy_by_instance_and_device(
+ context, instance, mountpoint)
def _attach_volume(self, context, volume_id, mountpoint, instance):
volume = self.volume_api.get(context, volume_id)
@@ -2202,7 +2477,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
self.driver.attach_volume(connection_info,
- instance['name'],
+ instance,
mountpoint)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
@@ -2228,7 +2503,8 @@ class ComputeManager(manager.SchedulerDependentManager):
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
- self.db.block_device_mapping_update_or_create(context, values)
+ self.conductor_api.block_device_mapping_update_or_create(context,
+ values)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
@@ -2248,7 +2524,7 @@ class ComputeManager(manager.SchedulerDependentManager):
connection_info['serial'] = volume_id
try:
self.driver.detach_volume(connection_info,
- instance['name'],
+ instance,
mp)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
@@ -2263,26 +2539,43 @@ class ComputeManager(manager.SchedulerDependentManager):
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
- bdm = self._get_instance_volume_bdm(context, instance['uuid'],
- volume_id)
+ bdm = self._get_instance_volume_bdm(context, instance, volume_id)
+ if CONF.volume_usage_poll_interval > 0:
+ vol_stats = []
+ mp = bdm['device_name']
+ # Handle bootable volumes which will not contain /dev/
+ if '/dev/' in mp:
+ mp = mp[5:]
+ try:
+ vol_stats = self.driver.block_stats(instance['name'], mp)
+ except NotImplementedError:
+ pass
+
+ if vol_stats:
+ LOG.debug(_("Updating volume usage cache with totals"))
+ rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
+ self.conductor_api.vol_usage_update(context, volume_id,
+ rd_req, rd_bytes,
+ wr_req, wr_bytes,
+ instance,
+ update_totals=True)
+
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
self.volume_api.detach(context.elevated(), volume)
- self.db.block_device_mapping_destroy_by_instance_and_volume(
- context, instance['uuid'], volume_id)
+ self.conductor_api.block_device_mapping_destroy_by_instance_and_volume(
+ context, instance, volume_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_volume_connection(self, context, volume_id, instance):
- """Remove a volume connection using the volume api"""
+ """Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
- bdm = self._get_instance_volume_bdm(context,
- instance['uuid'],
- volume_id)
+ bdm = self._get_instance_volume_bdm(context, instance, volume_id)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
@@ -2291,10 +2584,10 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
def _get_compute_info(self, context, host):
- compute_node_ref = self.db.service_get_all_compute_by_host(context,
- host)
+ compute_node_ref = self.conductor_api.service_get_by_compute_host(
+ context, host)
try:
- return compute_node_ref[0]['compute_node'][0]
+ return compute_node_ref['compute_node'][0]
except IndexError:
raise exception.NotFound(_("Host %(host)s not found") % locals())
@@ -2316,18 +2609,21 @@ class ComputeManager(manager.SchedulerDependentManager):
and None otherwise.
"""
src_compute_info = self._get_compute_info(ctxt, instance['host'])
- dst_compute_info = self._get_compute_info(ctxt, FLAGS.host)
+ dst_compute_info = self._get_compute_info(ctxt, CONF.host)
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
+ migrate_data = {}
try:
- self.compute_rpcapi.check_can_live_migrate_source(ctxt,
- instance, dest_check_data)
+ migrate_data = self.compute_rpcapi.\
+ check_can_live_migrate_source(ctxt, instance,
+ dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if dest_check_data and 'migrate_data' in dest_check_data:
- return dest_check_data['migrate_data']
+ migrate_data.update(dest_check_data['migrate_data'])
+ return migrate_data
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
@@ -2339,29 +2635,39 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
+
+ Returns a dict values required for live migration without shared
+ storage.
"""
- self.driver.check_can_live_migrate_source(ctxt, instance,
- dest_check_data)
+ is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
+ instance,
+ None)
+ dest_check_data['is_volume_backed'] = is_volume_backed
+ return self.driver.check_can_live_migrate_source(ctxt, instance,
+ dest_check_data)
def pre_live_migration(self, context, instance,
- block_migration=False, disk=None):
+ block_migration=False, disk=None,
+ migrate_data=None):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
+ :param migrate_data : if not None, it is a dict which holds data
+ required for live migration without shared storage.
"""
# If any volume is mounted, prepare here.
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ context, instance)
if not block_device_info['block_device_mapping']:
LOG.info(_('Instance has no volume.'), instance=instance)
# assign the volume to host system
# needed by the lefthand volume driver and maybe others
connector = self.driver.get_volume_connector(instance)
- for bdm in self._get_instance_volume_bdms(context, instance['uuid']):
+ for bdm in self._get_instance_volume_bdms(context, instance):
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume, connector)
@@ -2375,7 +2681,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.pre_live_migration(context, instance,
block_device_info,
- self._legacy_nw_info(network_info))
+ self._legacy_nw_info(network_info),
+ migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
@@ -2412,14 +2719,14 @@ class ComputeManager(manager.SchedulerDependentManager):
disk = None
self.compute_rpcapi.pre_live_migration(context, instance,
- block_migration, disk, dest)
+ block_migration, disk, dest, migrate_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Pre live migration failed at %(dest)s'),
locals(), instance=instance)
self._rollback_live_migration(context, instance, dest,
- block_migration)
+ block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
@@ -2430,7 +2737,7 @@ class ComputeManager(manager.SchedulerDependentManager):
block_migration, migrate_data)
def _post_live_migration(self, ctxt, instance_ref,
- dest, block_migration=False):
+ dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
@@ -2440,6 +2747,8 @@ class ComputeManager(manager.SchedulerDependentManager):
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest: destination host
:param block_migration: if true, prepare for block migration
+ :param migrate_data: if not None, it is a dict which has data
+ required for live migration without shared storage
"""
LOG.info(_('_post_live_migration() is started..'),
@@ -2447,7 +2756,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Detaching volumes.
connector = self.driver.get_volume_connector(instance_ref)
- for bdm in self._get_instance_volume_bdms(ctxt, instance_ref['uuid']):
+ for bdm in self._get_instance_volume_bdms(ctxt, instance_ref):
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
@@ -2465,29 +2774,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.unfilter_instance(instance_ref,
self._legacy_nw_info(network_info))
- # Database updating.
- # NOTE(jkoelker) This needs to be converted to network api calls
- # if nova wants to support floating_ips in
- # quantum/melange
- try:
- # Not return if floating_ip is not found, otherwise,
- # instance never be accessible..
- floating_ip = self.db.instance_get_floating_address(ctxt,
- instance_ref['id'])
- if not floating_ip:
- LOG.info(_('No floating_ip found'), instance=instance_ref)
- else:
- floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
- floating_ip)
- self.db.floating_ip_update(ctxt,
- floating_ip_ref['address'],
- {'host': dest})
- except exception.NotFound:
- LOG.info(_('No floating_ip found.'), instance=instance_ref)
- except Exception, e:
- LOG.error(_('Live migration: Unexpected error: cannot inherit '
- 'floating ip.\n%(e)s'), locals(),
- instance=instance_ref)
+ migration = {'source_compute': self.host,
+ 'dest_compute': dest, }
+ self.network_api.migrate_instance_start(ctxt, instance_ref, migration)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
@@ -2496,7 +2785,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
- if block_migration:
+ # must be deleted for preparing next live migration w/o shared storage
+ is_shared_storage = True
+ if migrate_data:
+ is_shared_storage = migrate_data.get('is_shared_storage', True)
+ if block_migration or not is_shared_storage:
self.driver.destroy(instance_ref,
self._legacy_nw_info(network_info))
else:
@@ -2536,6 +2829,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
+ migration = {'source_compute': instance['host'],
+ 'dest_compute': self.host, }
+ self.network_api.migrate_instance_finish(context, instance, migration)
network_info = self._get_instance_nw_info(context, instance)
self.driver.post_live_migration_at_destination(context, instance,
@@ -2543,57 +2839,58 @@ class ComputeManager(manager.SchedulerDependentManager):
block_migration)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- host=self.host,
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING)
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host, power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None,
+ expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
- def _rollback_live_migration(self, context, instance_ref,
- dest, block_migration):
+ def _rollback_live_migration(self, context, instance,
+ dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
- :param instance_ref: nova.db.sqlalchemy.models.Instance
+ :param instance: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
+ :param migrate_data:
+ if not none, contains implementation specific data.
"""
- host = instance_ref['host']
- self._instance_update(context,
- instance_ref['uuid'],
- host=host,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING)
+ host = instance['host']
+ instance = self._instance_update(context, instance['uuid'],
+ host=host, vm_state=vm_states.ACTIVE,
+ task_state=None, expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
- self.network_api.setup_networks_on_host(context, instance_ref,
- self.host)
+ self.network_api.setup_networks_on_host(context, instance, self.host)
- for bdm in self._get_instance_volume_bdms(context,
- instance_ref['uuid']):
+ for bdm in self._get_instance_volume_bdms(context, instance):
volume_id = bdm['volume_id']
- volume = self.volume_api.get(context, volume_id)
- self.compute_rpcapi.remove_volume_connection(context, instance_ref,
- volume['id'], dest)
+ self.compute_rpcapi.remove_volume_connection(context, instance,
+ volume_id, dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
- if block_migration:
+ # Also Volume backed live migration w/o shared storage needs to delete
+ # newly created instance-xxx dir on the destination as a part of its
+ # rollback process
+ is_volume_backed = False
+ is_shared_storage = True
+ if migrate_data:
+ is_volume_backed = migrate_data.get('is_volume_backed', False)
+ is_shared_storage = migrate_data.get('is_shared_storage', True)
+ if block_migration or (is_volume_backed and not is_shared_storage):
self.compute_rpcapi.rollback_live_migration_at_destination(context,
- instance_ref, dest)
+ instance, dest)
def rollback_live_migration_at_destination(self, context, instance):
- """ Cleaning up image directory that is created pre_live_migration.
+ """Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: an Instance dict sent over rpc
@@ -2607,7 +2904,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ context, instance)
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
@@ -2623,7 +2920,7 @@ class ComputeManager(manager.SchedulerDependentManager):
If anything errors, we don't care. It's possible the instance
has been deleted, etc.
"""
- heal_interval = FLAGS.heal_instance_info_cache_interval
+ heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
curr_time = time.time()
@@ -2637,14 +2934,14 @@ class ComputeManager(manager.SchedulerDependentManager):
while not instance or instance['host'] != self.host:
if instance_uuids:
try:
- instance = self.db.instance_get_by_uuid(context,
+ instance = self.conductor_api.instance_get_by_uuid(context,
instance_uuids.pop(0))
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
- db_instances = self.db.instance_get_all_by_host(
+ db_instances = self.conductor_api.instance_get_all_by_host(
context, self.host)
if not db_instances:
# None.. just return.
@@ -2657,7 +2954,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
- self.network_api.get_instance_nw_info(context, instance)
+ self._get_instance_nw_info(context, instance)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
@@ -2666,37 +2963,54 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _poll_rebooting_instances(self, context):
- if FLAGS.reboot_timeout > 0:
- instances = self.db.instance_get_all_hung_in_rebooting(
- context, FLAGS.reboot_timeout)
- self.driver.poll_rebooting_instances(FLAGS.reboot_timeout,
+ if CONF.reboot_timeout > 0:
+ instances = self.conductor_api.instance_get_all_hung_in_rebooting(
+ context, CONF.reboot_timeout)
+ self.driver.poll_rebooting_instances(CONF.reboot_timeout,
instances)
@manager.periodic_task
def _poll_rescued_instances(self, context):
- if FLAGS.rescue_timeout > 0:
- self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
+ if CONF.rescue_timeout > 0:
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
+
+ rescued_instances = []
+ for instance in instances:
+ if instance['vm_state'] == vm_states.RESCUED:
+ rescued_instances.append(instance)
+
+ to_unrescue = []
+ for instance in rescued_instances:
+ if timeutils.is_older_than(instance['launched_at'],
+ CONF.rescue_timeout):
+ to_unrescue.append(instance)
+
+ for instance in to_unrescue:
+ self.compute_api.unrescue(context, instance)
@manager.periodic_task
def _poll_unconfirmed_resizes(self, context):
- if FLAGS.resize_confirm_window > 0:
- migrations = self.db.migration_get_unconfirmed_by_dest_compute(
- context, FLAGS.resize_confirm_window, self.host)
+ if CONF.resize_confirm_window > 0:
+ capi = self.conductor_api
+ migrations = capi.migration_get_unconfirmed_by_dest_compute(
+ context, CONF.resize_confirm_window, self.host)
migrations_info = dict(migration_count=len(migrations),
- confirm_window=FLAGS.resize_confirm_window)
+ confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
- def _set_migration_to_error(migration_id, reason, **kwargs):
+ def _set_migration_to_error(migration, reason, **kwargs):
+ migration_id = migration['id']
msg = _("Setting migration %(migration_id)s to error: "
"%(reason)s") % locals()
LOG.warn(msg, **kwargs)
- self.db.migration_update(context, migration_id,
- {'status': 'error'})
+ self.conductor_api.migration_update(context, migration,
+ 'error')
for migration in migrations:
migration_id = migration['id']
@@ -2705,15 +3019,15 @@ class ComputeManager(manager.SchedulerDependentManager):
"%(migration_id)s for instance %(instance_uuid)s"),
locals())
try:
- instance = self.db.instance_get_by_uuid(context,
- instance_uuid)
+ instance = self.conductor_api.instance_get_by_uuid(
+ context, instance_uuid)
except exception.InstanceNotFound:
reason = _("Instance %(instance_uuid)s not found")
- _set_migration_to_error(migration_id, reason % locals())
+ _set_migration_to_error(migration, reason % locals())
continue
if instance['vm_state'] == vm_states.ERROR:
reason = _("In ERROR state")
- _set_migration_to_error(migration_id, reason % locals(),
+ _set_migration_to_error(migration, reason % locals(),
instance=instance)
continue
vm_state = instance['vm_state']
@@ -2721,7 +3035,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if vm_state != vm_states.RESIZED or task_state is not None:
reason = _("In states %(vm_state)s/%(task_state)s, not"
"RESIZED/None")
- _set_migration_to_error(migration_id, reason % locals(),
+ _set_migration_to_error(migration, reason % locals(),
instance=instance)
continue
try:
@@ -2733,14 +3047,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _instance_usage_audit(self, context):
- if FLAGS.instance_usage_audit:
+ if CONF.instance_usage_audit:
if not compute_utils.has_audit_been_run(context, self.host):
begin, end = utils.last_completed_audit_period()
- instances = self.db.instance_get_active_by_window_joined(
- context,
- begin,
- end,
- host=self.host)
+ capi = self.conductor_api
+ instances = capi.instance_get_active_by_window_joined(
+ context, begin, end, host=self.host)
num_instances = len(instances)
errors = 0
successes = 0
@@ -2783,11 +3095,12 @@ class ComputeManager(manager.SchedulerDependentManager):
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
- FLAGS.bandwidth_poll_interval):
+ CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
- instances = self.db.instance_get_all_by_host(context, self.host)
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
@@ -2804,20 +3117,19 @@ class ComputeManager(manager.SchedulerDependentManager):
bw_out = 0
last_ctr_in = None
last_ctr_out = None
- usage = self.db.bw_usage_get(context,
- bw_ctr['uuid'],
- start_time,
- bw_ctr['mac_address'])
+ usage = self.conductor_api.bw_usage_get(context,
+ bw_ctr['uuid'],
+ start_time,
+ bw_ctr['mac_address'])
if usage:
bw_in = usage['bw_in']
bw_out = usage['bw_out']
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
else:
- usage = self.db.bw_usage_get(context,
- bw_ctr['uuid'],
- prev_time,
- bw_ctr['mac_address'])
+ usage = self.conductor_api.bw_usage_get(
+ context, bw_ctr['uuid'], prev_time,
+ bw_ctr['mac_address'])
if usage:
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
@@ -2836,29 +3148,101 @@ class ComputeManager(manager.SchedulerDependentManager):
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
- self.db.bw_usage_update(context,
- bw_ctr['uuid'],
- bw_ctr['mac_address'],
- start_time,
- bw_in,
- bw_out,
- bw_ctr['bw_in'],
- bw_ctr['bw_out'],
- last_refreshed=refreshed)
+ self.conductor_api.bw_usage_update(context,
+ bw_ctr['uuid'],
+ bw_ctr['mac_address'],
+ start_time,
+ bw_in,
+ bw_out,
+ bw_ctr['bw_in'],
+ bw_ctr['bw_out'],
+ last_refreshed=refreshed)
+
+ def _get_host_volume_bdms(self, context, host):
+ """Return all block device mappings on a compute host."""
+ compute_host_bdms = []
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
+ for instance in instances:
+ instance_bdms = self._get_instance_volume_bdms(context, instance)
+ compute_host_bdms.append(dict(instance=instance,
+ instance_bdms=instance_bdms))
+
+ return compute_host_bdms
+
+ def _update_volume_usage_cache(self, context, vol_usages, refreshed):
+ """Updates the volume usage cache table with a list of stats."""
+ for usage in vol_usages:
+ # Allow switching of greenthreads between queries.
+ greenthread.sleep(0)
+ self.conductor_api.vol_usage_update(context, usage['volume'],
+ usage['rd_req'],
+ usage['rd_bytes'],
+ usage['wr_req'],
+ usage['wr_bytes'],
+ usage['instance'],
+ last_refreshed=refreshed)
+
+ def _send_volume_usage_notifications(self, context, start_time):
+ """Queries vol usage cache table and sends a vol usage notification."""
+ # We might have had a quick attach/detach that we missed in
+ # the last run of get_all_volume_usage and this one
+ # but detach stats will be recorded in db and returned from
+ # vol_get_usage_by_time
+ vol_usages = self.conductor_api.vol_get_usage_by_time(context,
+ start_time)
+ for vol_usage in vol_usages:
+ notifier.notify(context, 'volume.%s' % self.host, 'volume.usage',
+ notifier.INFO,
+ compute_utils.usage_volume_info(vol_usage))
+
+ @manager.periodic_task
+ def _poll_volume_usage(self, context, start_time=None):
+ if CONF.volume_usage_poll_interval == 0:
+ return
+ else:
+ if not start_time:
+ start_time = utils.last_completed_audit_period()[1]
+
+ curr_time = time.time()
+ if (curr_time - self._last_vol_usage_poll) < \
+ CONF.volume_usage_poll_interval:
+ return
+ else:
+ self._last_vol_usage_poll = curr_time
+ compute_host_bdms = self._get_host_volume_bdms(context,
+ self.host)
+ if not compute_host_bdms:
+ return
+ else:
+ LOG.debug(_("Updating volume usage cache"))
+ try:
+ vol_usages = self.driver.get_all_volume_usage(context,
+ compute_host_bdms)
+ except NotImplementedError:
+ return
+
+ refreshed = timeutils.utcnow()
+ self._update_volume_usage_cache(context, vol_usages,
+ refreshed)
+
+ self._send_volume_usage_notifications(context, start_time)
@manager.periodic_task
def _report_driver_status(self, context):
curr_time = time.time()
- if curr_time - self._last_host_check > FLAGS.host_state_interval:
+ if curr_time - self._last_host_check > CONF.host_state_interval:
self._last_host_check = curr_time
LOG.info(_("Updating host status"))
# This will grab info about the host and queue it
# to be sent to the Schedulers.
capabilities = self.driver.get_host_stats(refresh=True)
- capabilities['host_ip'] = FLAGS.my_ip
+ for capability in (capabilities if isinstance(capabilities, list)
+ else [capabilities]):
+ capability['host_ip'] = CONF.my_ip
self.update_service_capabilities(capabilities)
- @manager.periodic_task(ticks_between_runs=10)
+ @manager.periodic_task(spacing=600.0)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
@@ -2871,7 +3255,8 @@ class ComputeManager(manager.SchedulerDependentManager):
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
- db_instances = self.db.instance_get_all_by_host(context, self.host)
+ db_instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
@@ -2891,13 +3276,13 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
- vm_power_state = power_state.NOSTATE
+ vm_power_state = power_state.SHUTDOWN
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
- u = self.db.instance_get_by_uuid(context,
- db_instance['uuid'])
+ u = self.conductor_api.instance_get_by_uuid(context,
+ db_instance['uuid'])
db_power_state = u["power_state"]
vm_state = u['vm_state']
if self.host != u['host']:
@@ -2942,9 +3327,8 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
- if vm_power_state in (power_state.NOSTATE,
- power_state.SHUTDOWN,
- power_state.CRASHED):
+ if vm_power_state in (power_state.SHUTDOWN,
+ power_state.CRASHED):
LOG.warn(_("Instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
@@ -2997,21 +3381,23 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
- interval = FLAGS.reclaim_instance_interval
+ interval = CONF.reclaim_instance_interval
if interval <= 0:
- LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping..."))
+ LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return
- instances = self.db.instance_get_all_by_host(context, self.host)
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
for instance in instances:
- old_enough = (not instance.deleted_at or
- timeutils.is_older_than(instance.deleted_at,
+ old_enough = (not instance['deleted_at'] or
+ timeutils.is_older_than(instance['deleted_at'],
interval))
- soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
+ soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
if soft_deleted and old_enough:
- bdms = self.db.block_device_mapping_get_all_by_instance(
- context, instance['uuid'])
+ capi = self.conductor_api
+ bdms = capi.block_device_mapping_get_all_by_instance(
+ context, instance)
LOG.info(_('Reclaiming deleted instance'), instance=instance)
self._delete_instance(context, instance, bdms)
@@ -3024,10 +3410,15 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: security context
"""
- self.resource_tracker.update_available_resource(context)
-
- @manager.periodic_task(
- ticks_between_runs=FLAGS.running_deleted_instance_poll_interval)
+ new_resource_tracker_dict = {}
+ nodenames = self.driver.get_available_nodes()
+ for nodename in nodenames:
+ rt = self._get_resource_tracker(nodename)
+ rt.update_available_resource(context)
+ new_resource_tracker_dict[nodename] = rt
+ self._resource_tracker_dict = new_resource_tracker_dict
+
+ @manager.periodic_task(spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
@@ -3048,7 +3439,7 @@ class ComputeManager(manager.SchedulerDependentManager):
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
- action = FLAGS.running_deleted_instance_action
+ action = CONF.running_deleted_instance_action
if action == "noop":
return
@@ -3056,8 +3447,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
- bdms = self.db.block_device_mapping_get_all_by_instance(
- context, instance['uuid'])
+ capi = self.conductor_api
+ bdms = capi.block_device_mapping_get_all_by_instance(
+ context, instance)
if action == "log":
name = instance['name']
@@ -3076,7 +3468,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._cleanup_volumes(context, instance['uuid'], bdms)
else:
raise Exception(_("Unrecognized value '%(action)s'"
- " for FLAGS.running_deleted_"
+ " for CONF.running_deleted_"
"instance_action"), locals(),
instance=instance)
@@ -3086,17 +3478,18 @@ class ComputeManager(manager.SchedulerDependentManager):
should be pushed down to the virt layer for efficiency.
"""
def deleted_instance(instance):
- timeout = FLAGS.running_deleted_instance_timeout
- present = instance.name in present_name_labels
- erroneously_running = instance.deleted and present
- old_enough = (not instance.deleted_at or
- timeutils.is_older_than(instance.deleted_at,
+ timeout = CONF.running_deleted_instance_timeout
+ present = instance['name'] in present_name_labels
+ erroneously_running = instance['deleted'] and present
+ old_enough = (not instance['deleted_at'] or
+ timeutils.is_older_than(instance['deleted_at'],
timeout))
if erroneously_running and old_enough:
return True
return False
present_name_labels = set(self.driver.list_instances())
- instances = self.db.instance_get_all_by_host(context, self.host)
+ instances = self.conductor_api.instance_get_all_by_host(context,
+ self.host)
return [i for i in instances if deleted_instance(i)]
@contextlib.contextmanager
@@ -3116,23 +3509,24 @@ class ComputeManager(manager.SchedulerDependentManager):
aggregate=None, aggregate_id=None):
"""Notify hypervisor of change (for hypervisor pools)."""
if not aggregate:
- aggregate = self.db.aggregate_get(context, aggregate_id)
+ aggregate = self.conductor_api.aggregate_get(context, aggregate_id)
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except exception.AggregateError:
with excutils.save_and_reraise_exception():
- self.driver.undo_aggregate_operation(context,
- self.db.aggregate_host_delete,
- aggregate['id'], host)
+ self.driver.undo_aggregate_operation(
+ context,
+ self.conductor_api.aggregate_host_delete,
+ aggregate, host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_aggregate_host(self, context, host, slave_info=None,
aggregate=None, aggregate_id=None):
"""Removes a host from a physical hypervisor pool."""
if not aggregate:
- aggregate = self.db.aggregate_get(context, aggregate_id)
+ aggregate = self.conductor_api.aggregate_get(context, aggregate_id)
try:
self.driver.remove_from_aggregate(context, aggregate, host,
@@ -3141,19 +3535,35 @@ class ComputeManager(manager.SchedulerDependentManager):
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
- context, self.db.aggregate_host_add,
- aggregate['id'], host,
+ context,
+ self.conductor_api.aggregate_host_add,
+ aggregate, host,
isinstance(e, exception.AggregateError))
- @manager.periodic_task(
- ticks_between_runs=FLAGS.image_cache_manager_interval)
+ @manager.periodic_task(spacing=CONF.image_cache_manager_interval,
+ external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
- if FLAGS.image_cache_manager_interval == 0:
+ if CONF.image_cache_manager_interval == 0:
return
- all_instances = self.db.instance_get_all(context)
- self.driver.manage_image_cache(context, all_instances)
+ all_instances = self.conductor_api.instance_get_all(context)
+
+ # Determine what other nodes use this storage
+ storage_users.register_storage_use(CONF.instances_path, CONF.host)
+ nodes = storage_users.get_storage_users(CONF.instances_path)
+
+ # Filter all_instances to only include those nodes which share this
+ # storage path.
+ # TODO(mikal): this should be further refactored so that the cache
+ # cleanup code doesn't know what those instances are, just a remote
+ # count, and then this logic should be pushed up the stack.
+ filtered_instances = []
+ for instance in all_instances:
+ if instance['host'] in nodes:
+ filtered_instances.append(instance)
+
+ self.driver.manage_image_cache(context, filtered_instances)
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index e4a65c081..f3c3ae7a3 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -20,17 +20,18 @@ model.
"""
from nova.compute import claims
+from nova.compute import instance_types
+from nova.compute import task_states
from nova.compute import vm_states
+from nova import conductor
from nova import context
from nova import db
from nova import exception
-from nova import flags
-from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
-from nova import utils
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
@@ -42,8 +43,8 @@ resource_tracker_opts = [
help='Class that will manage stats for the local compute host')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(resource_tracker_opts)
+CONF = cfg.CONF
+CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE
@@ -54,12 +55,15 @@ class ResourceTracker(object):
are built and destroyed.
"""
- def __init__(self, host, driver):
+ def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
+ self.nodename = nodename
self.compute_node = None
- self.stats = importutils.import_object(FLAGS.compute_stats_class)
+ self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
+ self.tracked_migrations = {}
+ self.conductor_api = conductor.API()
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def instance_claim(self, context, instance_ref, limits=None):
@@ -79,14 +83,18 @@ class ResourceTracker(object):
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
- # set the 'host' field and continue the build:
- instance_ref = self._set_instance_host(context,
- instance_ref['uuid'])
+ # set the 'host' and node fields and continue the build:
+ self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
- # sanity check:
+ # sanity checks:
if instance_ref['host']:
- LOG.warning(_("Host field should be not be set on the instance "
+ LOG.warning(_("Host field should not be set on the instance until "
+ "resources have been claimed."),
+ instance=instance_ref)
+
+ if instance_ref['node']:
+ LOG.warning(_("Node field should be not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
@@ -94,8 +102,7 @@ class ResourceTracker(object):
if claim.test(self.compute_node, limits):
- instance_ref = self._set_instance_host(context,
- instance_ref['uuid'])
+ self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(self.compute_node, instance_ref)
@@ -108,19 +115,80 @@ class ResourceTracker(object):
else:
raise exception.ComputeResourcesUnavailable()
- def _set_instance_host(self, context, instance_uuid):
- """Tag the instance as belonging to this host. This should be done
- while the COMPUTE_RESOURCES_SEMPAHORE is being held so the resource
+ @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
+ def resize_claim(self, context, instance_ref, instance_type, limits=None):
+ """Indicate that resources are needed for a resize operation to this
+ compute host.
+ :param context: security context
+ :param instance_ref: instance to reserve resources for
+ :param instance_type: new instance_type being resized to
+ :param limits: Dict of oversubscription limits for memory, disk,
+ and CPUs.
+ :returns: A Claim ticket representing the reserved resources. This
+ should be turned into finalize a resource claim or free
+ resources after the compute operation is finished.
+ """
+ if self.disabled:
+ # compute_driver doesn't support resource tracking, just
+ # generate the migration record and continue the resize:
+ migration_ref = self._create_migration(context, instance_ref,
+ instance_type)
+ return claims.NopClaim(migration=migration_ref)
+
+ claim = claims.ResizeClaim(instance_ref, instance_type, self)
+
+ if claim.test(self.compute_node, limits):
+
+ migration_ref = self._create_migration(context, instance_ref,
+ instance_type)
+ claim.migration = migration_ref
+
+ # Mark the resources in-use for the resize landing on this
+ # compute host:
+ self._update_usage_from_migration(self.compute_node, migration_ref)
+ elevated = context.elevated()
+ self._update(elevated, self.compute_node)
+
+ return claim
+
+ else:
+ raise exception.ComputeResourcesUnavailable()
+
+ def _create_migration(self, context, instance, instance_type):
+ """Create a migration record for the upcoming resize. This should
+ be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
- values = {'host': self.host, 'launched_on': self.host}
- (old_ref, instance_ref) = db.instance_update_and_get_original(context,
- instance_uuid, values)
- notifications.send_update(context, old_ref, instance_ref)
- return instance_ref
+ # TODO(russellb): no-db-compute: Send the old instance type
+ # info that is needed via rpc so db access isn't required
+ # here.
+ old_instance_type_id = instance['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+
+ return self.conductor_api.migration_create(context, instance,
+ {'dest_compute': self.host,
+ 'dest_node': self.nodename,
+ 'dest_host': self.driver.get_host_ip_addr(),
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': instance_type['id'],
+ 'status': 'pre-migrating'})
+
+ def _set_instance_host_and_node(self, context, instance_ref):
+ """Tag the instance as belonging to this host. This should be done
+ while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim
+ will not be lost if the audit process starts.
+ """
+ values = {'host': self.host, 'node': self.nodename,
+ 'launched_on': self.host}
+ self.conductor_api.instance_update(context, instance_ref['uuid'],
+ **values)
+ instance_ref['host'] = self.host
+ instance_ref['launched_on'] = self.host
+ instance_ref['node'] = self.nodename
def abort_instance_claim(self, instance):
- """Remove usage from the given instance"""
+ """Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
@@ -129,6 +197,18 @@ class ResourceTracker(object):
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
+ def abort_resize_claim(self, instance_uuid, instance_type):
+ """Remove usage for an incoming migration."""
+ if instance_uuid in self.tracked_migrations:
+ migration, itype = self.tracked_migrations.pop(instance_uuid)
+
+ if instance_type['id'] == migration['new_instance_type_id']:
+ self.stats.update_stats_for_migration(itype, sign=-1)
+ self._update_usage(self.compute_node, itype, sign=-1)
+
+ ctxt = context.get_admin_context()
+ self._update(ctxt, self.compute_node)
+
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def update_usage(self, context, instance):
"""Update the resource usage and stats after a change in an
@@ -137,9 +217,10 @@ class ResourceTracker(object):
if self.disabled:
return
+ uuid = instance['uuid']
+
# don't update usage for this instance unless it submitted a resource
# claim first:
- uuid = instance['uuid']
if uuid in self.tracked_instances:
self._update_usage_from_instance(self.compute_node, instance)
self._update(context.elevated(), self.compute_node)
@@ -157,7 +238,8 @@ class ResourceTracker(object):
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
- resources = self.driver.get_available_resource()
+ LOG.audit(_("Auditing locally available compute resources"))
+ resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.audit(_("Virt driver does not support "
@@ -169,17 +251,30 @@ class ResourceTracker(object):
self._report_hypervisor_resource_view(resources)
- # Grab all instances assigned to this host:
- instances = db.instance_get_all_by_host(context, self.host)
+ # Grab all instances assigned to this node:
+ instances = db.instance_get_all_by_host_and_node(context, self.host,
+ self.nodename)
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(resources, instances)
+
+ # Grab all in-progress migrations:
+ migrations = db.migration_get_in_progress_by_host_and_node(context,
+ self.host, self.nodename)
+
+ self._update_usage_from_migrations(resources, migrations)
+
+ # Detect and account for orphaned instances that may exist on the
+ # hypervisor, but are not in the DB:
+ orphans = self._find_orphaned_instances()
+ self._update_usage_from_orphans(resources, orphans)
+
self._report_final_resource_view(resources)
self._sync_compute_node(context, resources)
def _sync_compute_node(self, context, resources):
- """Create or update the compute node DB record"""
+ """Create or update the compute node DB record."""
if not self.compute_node:
# we need a copy of the ComputeNode record:
service = self._get_service(context)
@@ -187,9 +282,12 @@ class ResourceTracker(object):
# no service record, disable resource
return
- compute_node_ref = service['compute_node']
- if compute_node_ref:
- self.compute_node = compute_node_ref[0]
+ compute_node_refs = service['compute_node']
+ if compute_node_refs:
+ for cn in compute_node_refs:
+ if cn.get('hypervisor_hostname') == self.nodename:
+ self.compute_node = cn
+ break
if not self.compute_node:
# Need to create the ComputeNode record:
@@ -203,15 +301,14 @@ class ResourceTracker(object):
LOG.info(_('Compute_service record updated for %s ') % self.host)
def _create(self, context, values):
- """Create the compute node in the DB"""
+ """Create the compute node in the DB."""
# initialize load stats from existing instances:
compute_node = db.compute_node_create(context, values)
self.compute_node = dict(compute_node)
def _get_service(self, context):
try:
- return db.service_get_all_compute_by_host(context,
- self.host)[0]
+ return db.service_get_by_compute_host(context, self.host)
except exception.NotFound:
LOG.warn(_("No service record for host %s"), self.host)
@@ -249,11 +346,115 @@ class ResourceTracker(object):
LOG.audit(_("Free VCPU information unavailable"))
def _update(self, context, values, prune_stats=False):
- """Persist the compute node updates to the DB"""
+ """Persist the compute node updates to the DB."""
compute_node = db.compute_node_update(context,
self.compute_node['id'], values, prune_stats)
self.compute_node = dict(compute_node)
+ def confirm_resize(self, context, migration, status='confirmed'):
+ """Cleanup usage for a confirmed resize."""
+ elevated = context.elevated()
+ self.conductor_api.migration_update(elevated, migration, status)
+ self.update_available_resource(elevated)
+
+ def revert_resize(self, context, migration, status='reverted'):
+ """Cleanup usage for a reverted resize."""
+ self.confirm_resize(context, migration, status)
+
+ def _update_usage(self, resources, usage, sign=1):
+ resources['memory_mb_used'] += sign * usage['memory_mb']
+ resources['local_gb_used'] += sign * usage.get('root_gb', 0)
+ resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0)
+
+ # free ram and disk may be negative, depending on policy:
+ resources['free_ram_mb'] = (resources['memory_mb'] -
+ resources['memory_mb_used'])
+ resources['free_disk_gb'] = (resources['local_gb'] -
+ resources['local_gb_used'])
+
+ resources['running_vms'] = self.stats.num_instances
+ resources['vcpus_used'] = self.stats.num_vcpus_used
+
+ def _update_usage_from_migration(self, resources, migration):
+ """Update usage for a single migration. The record may
+ represent an incoming or outbound migration.
+ """
+ uuid = migration['instance_uuid']
+ LOG.audit(_("Updating from migration %s") % uuid)
+
+ incoming = (migration['dest_compute'] == self.host and
+ migration['dest_node'] == self.nodename)
+ outbound = (migration['source_compute'] == self.host and
+ migration['source_node'] == self.nodename)
+ same_node = (incoming and outbound)
+
+ instance = self.tracked_instances.get(uuid, None)
+ itype = None
+
+ if same_node:
+ # same node resize. record usage for whichever instance type the
+ # instance is *not* in:
+ if (instance['instance_type_id'] ==
+ migration['old_instance_type_id']):
+
+ itype = migration['new_instance_type_id']
+ else:
+ # instance record already has new flavor, hold space for a
+ # possible revert to the old instance type:
+ itype = migration['old_instance_type_id']
+
+ elif incoming and not instance:
+ # instance has not yet migrated here:
+ itype = migration['new_instance_type_id']
+
+ elif outbound and not instance:
+ # instance migrated, but record usage for a possible revert:
+ itype = migration['old_instance_type_id']
+
+ if itype:
+ instance_type = instance_types.get_instance_type(itype)
+ self.stats.update_stats_for_migration(instance_type)
+ self._update_usage(resources, instance_type)
+ resources['stats'] = self.stats
+ self.tracked_migrations[uuid] = (migration, instance_type)
+
+ def _update_usage_from_migrations(self, resources, migrations):
+
+ self.tracked_migrations.clear()
+
+ filtered = {}
+
+ # do some defensive filtering against bad migrations records in the
+ # database:
+ for migration in migrations:
+
+ instance = migration['instance']
+
+ if not instance:
+ # migration referencing deleted instance
+ continue
+
+ uuid = instance['uuid']
+
+ # skip migration if instance isn't in a resize state:
+ if not self._instance_in_resize_state(instance):
+ LOG.warn(_("Instance not resizing, skipping migration."),
+ instance_uuid=uuid)
+ continue
+
+ # filter to most recently updated migration for each instance:
+ m = filtered.get(uuid, None)
+ if not m or migration['updated_at'] >= m['updated_at']:
+ filtered[uuid] = migration
+
+ for migration in filtered.values():
+ try:
+ self._update_usage_from_migration(resources, migration)
+ except exception.InstanceTypeNotFound:
+ LOG.warn(_("InstanceType could not be found, skipping "
+ "migration."), instance_uuid=uuid)
+ continue
+
def _update_usage_from_instance(self, resources, instance):
"""Update usage for a single instance."""
@@ -262,7 +463,7 @@ class ResourceTracker(object):
is_deleted_instance = instance['vm_state'] == vm_states.DELETED
if is_new_instance:
- self.tracked_instances[uuid] = 1
+ self.tracked_instances[uuid] = jsonutils.to_primitive(instance)
sign = 1
if is_deleted_instance:
@@ -274,18 +475,7 @@ class ResourceTracker(object):
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
- resources['memory_mb_used'] += sign * instance['memory_mb']
- resources['local_gb_used'] += sign * instance['root_gb']
- resources['local_gb_used'] += sign * instance['ephemeral_gb']
-
- # free ram and disk may be negative, depending on policy:
- resources['free_ram_mb'] = (resources['memory_mb'] -
- resources['memory_mb_used'])
- resources['free_disk_gb'] = (resources['local_gb'] -
- resources['local_gb_used'])
-
- resources['running_vms'] = self.stats.num_instances
- resources['vcpus_used'] = self.stats.num_vcpus_used
+ self._update_usage(resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
resources['stats'] = self.stats
@@ -302,8 +492,8 @@ class ResourceTracker(object):
self.stats.clear()
# set some intiial values, reserve room for host/hypervisor:
- resources['local_gb_used'] = FLAGS.reserved_host_disk_mb / 1024
- resources['memory_mb_used'] = FLAGS.reserved_host_memory_mb
+ resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
+ resources['memory_mb_used'] = CONF.reserved_host_memory_mb
resources['vcpus_used'] = 0
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
@@ -313,7 +503,44 @@ class ResourceTracker(object):
resources['running_vms'] = 0
for instance in instances:
- self._update_usage_from_instance(resources, instance)
+ if instance['vm_state'] == vm_states.DELETED:
+ continue
+ else:
+ self._update_usage_from_instance(resources, instance)
+
+ def _find_orphaned_instances(self):
+ """Given the set of instances and migrations already account for
+ by resource tracker, sanity check the hypervisor to determine
+ if there are any "orphaned" instances left hanging around.
+
+ Orphans could be consuming memory and should be accounted for in
+ usage calculations to guard against potential out of memory
+ errors.
+ """
+ uuids1 = frozenset(self.tracked_instances.keys())
+ uuids2 = frozenset(self.tracked_migrations.keys())
+ uuids = uuids1 | uuids2
+
+ usage = self.driver.get_per_instance_usage()
+ vuuids = frozenset(usage.keys())
+
+ orphan_uuids = vuuids - uuids
+ orphans = [usage[uuid] for uuid in orphan_uuids]
+
+ return orphans
+
+ def _update_usage_from_orphans(self, resources, orphans):
+ """Include orphaned instances in usage."""
+ for orphan in orphans:
+ uuid = orphan['uuid']
+ memory_mb = orphan['memory_mb']
+
+ LOG.warn(_("Detected running orphan instance: %(uuid)s (consuming "
+ "%(memory_mb)s MB memory") % locals())
+
+ # just record memory usage for the orphan
+ usage = {'memory_mb': orphan['memory_mb']}
+ self._update_usage(resources, usage)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
@@ -323,3 +550,17 @@ class ResourceTracker(object):
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
+
+ def _instance_in_resize_state(self, instance):
+ vm = instance['vm_state']
+ task = instance['task_state']
+
+ if vm == vm_states.RESIZED:
+ return True
+
+ if (vm == vm_states.ACTIVE and task in [task_states.RESIZE_PREP,
+ task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH]):
+ return True
+
+ return False
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 2ec4a6736..3e7ed1cfd 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -19,13 +19,19 @@ Client side of the compute RPC API.
"""
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
import nova.openstack.common.rpc.proxy
+rpcapi_opts = [
+ cfg.StrOpt('compute_topic',
+ default='compute',
+ help='the topic compute nodes listen on'),
+]
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.register_opts(rpcapi_opts)
def _compute_topic(topic, ctxt, host, instance):
@@ -144,6 +150,14 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
2.16 - Add instance_type to resize_instance
+ 2.17 - Add get_backdoor_port()
+ 2.18 - Add bdms to rebuild_instance
+ 2.19 - Add node to run_instance
+ 2.20 - Add node to prep_resize
+ 2.21 - Add migrate_data dict param to pre_live_migration()
+ 2.22 - Add recreate, on_shared_storage and host arguments to
+ rebuild_instance()
+ 2.23 - Remove network_info from reboot_instance
'''
#
@@ -158,7 +172,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def __init__(self):
super(ComputeAPI, self).__init__(
- topic=FLAGS.compute_topic,
+ topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
@@ -211,10 +225,11 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
instance_p = jsonutils.to_primitive(instance)
- self.call(ctxt, self.make_msg('check_can_live_migrate_source',
- instance=instance_p,
- dest_check_data=dest_check_data),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ return self.call(ctxt, self.make_msg('check_can_live_migrate_source',
+ instance=instance_p,
+ dest_check_data=dest_check_data),
+ topic=_compute_topic(self.topic, ctxt, None,
+ instance))
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
@@ -346,46 +361,52 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, None, instance))
def pre_live_migration(self, ctxt, instance, block_migration, disk,
- host):
+ host, migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('pre_live_migration',
- instance=instance_p, block_migration=block_migration,
- disk=disk), _compute_topic(self.topic, ctxt, host, None))
+ instance=instance_p,
+ block_migration=block_migration,
+ disk=disk, migrate_data=migrate_data),
+ _compute_topic(self.topic, ctxt, host, None),
+ version='2.21')
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
- filter_properties=None):
+ filter_properties=None, node=None):
instance_p = jsonutils.to_primitive(instance)
instance_type_p = jsonutils.to_primitive(instance_type)
self.cast(ctxt, self.make_msg('prep_resize',
instance=instance_p, instance_type=instance_type_p,
image=image, reservations=reservations,
request_spec=request_spec,
- filter_properties=filter_properties),
+ filter_properties=filter_properties,
+ node=node),
_compute_topic(self.topic, ctxt, host, None),
- version='2.10')
+ version='2.20')
- def reboot_instance(self, ctxt, instance,
- block_device_info, network_info, reboot_type):
+ def reboot_instance(self, ctxt, instance, block_device_info,
+ reboot_type):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reboot_instance',
instance=instance_p,
block_device_info=block_device_info,
- network_info=network_info,
reboot_type=reboot_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.5')
+ version='2.23')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
- image_ref, orig_image_ref, orig_sys_metadata):
+ image_ref, orig_image_ref, orig_sys_metadata, bdms,
+ recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
+ bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
- orig_sys_metadata=orig_sys_metadata),
- topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.1')
+ orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
+ recreate=recreate, on_shared_storage=on_shared_storage),
+ topic=_compute_topic(self.topic, ctxt, host, instance),
+ version='2.22')
def refresh_provider_fw_rules(self, ctxt, host):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
@@ -471,14 +492,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
- is_first_time):
+ is_first_time, node=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('run_instance', instance=instance_p,
request_spec=request_spec, filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files, admin_password=admin_password,
- is_first_time=is_first_time),
- topic=_compute_topic(self.topic, ctxt, host, None))
+ is_first_time=is_first_time, node=node),
+ topic=_compute_topic(self.topic, ctxt, host, None),
+ version='2.19')
def set_admin_password(self, ctxt, instance, new_pass):
instance_p = jsonutils.to_primitive(instance)
@@ -503,7 +525,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.3')
def snapshot_instance(self, ctxt, instance, image_id, image_type,
- backup_type, rotation):
+ backup_type=None, rotation=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('snapshot_instance',
instance=instance_p, image_id=image_id,
@@ -550,6 +572,10 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ topic=_compute_topic(self.topic, ctxt, host, None))
+
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
@@ -589,7 +615,7 @@ class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def __init__(self):
super(SecurityGroupAPI, self).__init__(
- topic=FLAGS.compute_topic,
+ topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
diff --git a/nova/compute/stats.py b/nova/compute/stats.py
index 062fac59f..b6a30d38f 100644
--- a/nova/compute/stats.py
+++ b/nova/compute/stats.py
@@ -33,7 +33,7 @@ class Stats(dict):
@property
def io_workload(self):
- """Calculate an I/O based load by counting I/O heavy operations"""
+ """Calculate an I/O based load by counting I/O heavy operations."""
def _get(state, state_type):
key = "num_%s_%s" % (state_type, state)
@@ -114,6 +114,10 @@ class Stats(dict):
# save updated I/O workload in stats:
self["io_workload"] = self.io_workload
+ def update_stats_for_migration(self, instance_type, sign=1):
+ x = self.get("num_vcpus_used", 0)
+ self["num_vcpus_used"] = x + (sign * instance_type['vcpus'])
+
def _decrement(self, key):
x = self.get(key, 0)
self[key] = x - 1
@@ -123,7 +127,7 @@ class Stats(dict):
self[key] = x + 1
def _extract_state_from_instance(self, instance):
- """Save the useful bits of instance state for tracking purposes"""
+ """Save the useful bits of instance state for tracking purposes."""
uuid = instance['uuid']
vm_state = instance['vm_state']
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index c2966d554..8e2b8344a 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -33,6 +33,8 @@ SPAWNING = 'spawning'
# possible task states during snapshot()
IMAGE_SNAPSHOT = 'image_snapshot'
+IMAGE_PENDING_UPLOAD = 'image_pending_upload'
+IMAGE_UPLOADING = 'image_uploading'
# possible task states during backup()
IMAGE_BACKUP = 'image_backup'
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 4a284be64..0c475d082 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -24,17 +24,26 @@ from nova import block_device
from nova.compute import instance_types
from nova import db
from nova import exception
-from nova import flags
from nova.network import model as network_model
from nova import notifications
+from nova.openstack.common import cfg
from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api
from nova import utils
+from nova.virt import driver
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
+def metadata_to_dict(metadata):
+ result = {}
+ for item in metadata:
+ result[item['key']] = item['value']
+ return result
+
+
def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None):
"""Adds the specified fault to the database."""
@@ -86,7 +95,7 @@ def get_device_name_for_instance(context, instance, device):
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=mappings['root'])
# NOTE(vish): remove this when xenapi is setting default_root_device
- if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'):
+ if driver.compute_driver_matches('xenapi.XenAPIDriver'):
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())
@@ -101,7 +110,7 @@ def get_device_name_for_instance(context, instance, device):
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
- if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'):
+ if driver.compute_driver_matches('xenapi.XenAPIDriver'):
instance_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(instance_type_id)
if instance_type['ephemeral_gb']:
@@ -151,11 +160,7 @@ def notify_usage_exists(context, instance_ref, current_period=False,
ignore_missing_network_data)
if system_metadata is None:
- try:
- system_metadata = db.instance_system_metadata_get(
- context, instance_ref['uuid'])
- except exception.NotFound:
- system_metadata = {}
+ system_metadata = metadata_to_dict(instance_ref['system_metadata'])
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
@@ -184,11 +189,11 @@ def notify_about_instance_usage(context, instance, event_suffix,
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
:param host: Compute host for the instance, if specified. Default is
- FLAGS.host
+ CONF.host
"""
if not host:
- host = FLAGS.host
+ host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
@@ -225,3 +230,26 @@ def start_instance_usage_audit(context, begin, end, host, num_instances):
def finish_instance_usage_audit(context, begin, end, host, errors, message):
db.task_log_end_task(context, "instance_usage_audit", begin, end, host,
errors, message)
+
+
+def usage_volume_info(vol_usage):
+ def null_safe_str(s):
+ return str(s) if s else ''
+
+ tot_refreshed = vol_usage['tot_last_refreshed']
+ curr_refreshed = vol_usage['curr_last_refreshed']
+ last_refreshed_time = (tot_refreshed if tot_refreshed > curr_refreshed
+ else curr_refreshed)
+
+ usage_info = dict(
+ volume_id=vol_usage['volume_id'],
+ instance_id=vol_usage['instance_id'],
+ last_refreshed=null_safe_str(last_refreshed_time),
+ reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
+ read_bytes=vol_usage['tot_read_bytes'] +
+ vol_usage['curr_read_bytes'],
+ writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
+ write_bytes=vol_usage['tot_write_bytes'] +
+ vol_usage['curr_write_bytes'])
+
+ return usage_info
diff --git a/nova/conductor/__init__.py b/nova/conductor/__init__.py
new file mode 100644
index 000000000..4c702d037
--- /dev/null
+++ b/nova/conductor/__init__.py
@@ -0,0 +1,26 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.conductor import api as conductor_api
+import nova.openstack.common.cfg
+import nova.openstack.common.importutils
+
+
+def API(*args, **kwargs):
+ use_local = kwargs.pop('use_local', False)
+ if nova.openstack.common.cfg.CONF.conductor.use_local or use_local:
+ api = conductor_api.LocalAPI
+ else:
+ api = conductor_api.API
+ return api(*args, **kwargs)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
new file mode 100644
index 000000000..63b64f830
--- /dev/null
+++ b/nova/conductor/api.py
@@ -0,0 +1,520 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handles all requests to the conductor service."""
+
+import functools
+
+from nova.conductor import manager
+from nova.conductor import rpcapi
+from nova import exception as exc
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.openstack.common.rpc import common as rpc_common
+
+conductor_opts = [
+ cfg.BoolOpt('use_local',
+ default=False,
+ help='Perform nova-conductor operations locally'),
+ cfg.StrOpt('topic',
+ default='conductor',
+ help='the topic conductor nodes listen on'),
+ cfg.StrOpt('manager',
+ default='nova.conductor.manager.ConductorManager',
+ help='full class name for the Manager for conductor'),
+]
+conductor_group = cfg.OptGroup(name='conductor',
+ title='Conductor Options')
+CONF = cfg.CONF
+CONF.register_group(conductor_group)
+CONF.register_opts(conductor_opts, conductor_group)
+
+LOG = logging.getLogger(__name__)
+
+
+class ExceptionHelper(object):
+ """Class to wrap another and translate the ClientExceptions raised by its
+ function calls to the actual ones"""
+
+ def __init__(self, target):
+ self._target = target
+
+ def __getattr__(self, name):
+ func = getattr(self._target, name)
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except rpc_common.ClientException, e:
+ raise (e._exc_info[1], None, e._exc_info[2])
+ return wrapper
+
+
+class LocalAPI(object):
+ """A local version of the conductor API that does database updates
+ locally instead of via RPC"""
+
+ def __init__(self):
+ # TODO(danms): This needs to be something more generic for
+ # other/future users of this sort of functionality.
+ self._manager = ExceptionHelper(manager.ConductorManager())
+
+ def wait_until_ready(self, context, *args, **kwargs):
+ # nothing to wait for in the local case.
+ pass
+
+ def ping(self, context, arg, timeout=None):
+ return self._manager.ping(context, arg)
+
+ def instance_update(self, context, instance_uuid, **updates):
+ """Perform an instance update in the database."""
+ return self._manager.instance_update(context, instance_uuid, updates)
+
+ def instance_get(self, context, instance_id):
+ return self._manager.instance_get(context, instance_id)
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return self._manager.instance_get_by_uuid(context, instance_uuid)
+
+ def instance_destroy(self, context, instance):
+ return self._manager.instance_destroy(context, instance)
+
+ def instance_get_all(self, context):
+ return self._manager.instance_get_all(context)
+
+ def instance_get_all_by_host(self, context, host):
+ return self._manager.instance_get_all_by_host(context, host)
+
+ def instance_get_all_by_filters(self, context, filters,
+ sort_key='created_at',
+ sort_dir='desc'):
+ return self._manager.instance_get_all_by_filters(context,
+ filters,
+ sort_key,
+ sort_dir)
+
+ def instance_get_all_hung_in_rebooting(self, context, timeout):
+ return self._manager.instance_get_all_hung_in_rebooting(context,
+ timeout)
+
+ def instance_get_active_by_window(self, context, begin, end=None,
+ project_id=None, host=None):
+ return self._manager.instance_get_active_by_window(
+ context, begin, end, project_id, host)
+
+ def instance_info_cache_update(self, context, instance, values):
+ return self._manager.instance_info_cache_update(context,
+ instance,
+ values)
+
+ def instance_info_cache_delete(self, context, instance):
+ return self._manager.instance_info_cache_delete(context, instance)
+
+ def instance_type_get(self, context, instance_type_id):
+ return self._manager.instance_type_get(context, instance_type_id)
+
+ def migration_get(self, context, migration_id):
+ return self._manager.migration_get(context, migration_id)
+
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ return self._manager.migration_get_unconfirmed_by_dest_compute(
+ context, confirm_window, dest_compute)
+
+ def migration_create(self, context, instance, values):
+ return self._manager.migration_create(context, instance, values)
+
+ def migration_update(self, context, migration, status):
+ return self._manager.migration_update(context, migration, status)
+
+ def aggregate_host_add(self, context, aggregate, host):
+ return self._manager.aggregate_host_add(context, aggregate, host)
+
+ def aggregate_host_delete(self, context, aggregate, host):
+ return self._manager.aggregate_host_delete(context, aggregate, host)
+
+ def aggregate_get(self, context, aggregate_id):
+ return self._manager.aggregate_get(context, aggregate_id)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ return self._manager.aggregate_get_by_host(context, host, key)
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ return self._manager.aggregate_metadata_add(context, aggregate,
+ metadata,
+ set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ return self._manager.aggregate_metadata_delete(context,
+ aggregate,
+ key)
+
+ def bw_usage_get(self, context, uuid, start_period, mac):
+ return self._manager.bw_usage_update(context, uuid, mac, start_period)
+
+ def bw_usage_update(self, context, uuid, mac, start_period,
+ bw_in, bw_out, last_ctr_in, last_ctr_out,
+ last_refreshed=None):
+ return self._manager.bw_usage_update(context, uuid, mac, start_period,
+ bw_in, bw_out,
+ last_ctr_in, last_ctr_out,
+ last_refreshed)
+
+ def get_backdoor_port(self, context, host):
+ raise exc.InvalidRequest
+
+ def security_group_get_by_instance(self, context, instance):
+ return self._manager.security_group_get_by_instance(context, instance)
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ return self._manager.security_group_rule_get_by_security_group(
+ context, secgroup)
+
+ def provider_fw_rule_get_all(self, context):
+ return self._manager.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return self._manager.agent_build_get_by_triple(context, hypervisor,
+ os, architecture)
+
+ def block_device_mapping_create(self, context, values):
+ return self._manager.block_device_mapping_update_or_create(context,
+ values,
+ create=True)
+
+ def block_device_mapping_update(self, context, bdm_id, values):
+ values = dict(values)
+ values['id'] = bdm_id
+ return self._manager.block_device_mapping_update_or_create(
+ context, values, create=False)
+
+ def block_device_mapping_update_or_create(self, context, values):
+ return self._manager.block_device_mapping_update_or_create(context,
+ values)
+
+ def block_device_mapping_get_all_by_instance(self, context, instance):
+ return self._manager.block_device_mapping_get_all_by_instance(
+ context, instance)
+
+ def block_device_mapping_destroy(self, context, bdms):
+ return self._manager.block_device_mapping_destroy(context, bdms=bdms)
+
+ def block_device_mapping_destroy_by_instance_and_device(self, context,
+ instance,
+ device_name):
+ return self._manager.block_device_mapping_destroy(
+ context, instance=instance, device_name=device_name)
+
+ def block_device_mapping_destroy_by_instance_and_volume(self, context,
+ instance,
+ volume_id):
+ return self._manager.block_device_mapping_destroy(
+ context, instance=instance, volume_id=volume_id)
+
+ def vol_get_usage_by_time(self, context, start_time):
+ return self._manager.vol_get_usage_by_time(context, start_time)
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ return self._manager.vol_usage_update(context, vol_id,
+ rd_req, rd_bytes,
+ wr_req, wr_bytes,
+ instance, last_refreshed,
+ update_totals)
+
+ def service_get_all(self, context):
+ return self._manager.service_get_all_by(context)
+
+ def service_get_all_by_topic(self, context, topic):
+ return self._manager.service_get_all_by(context, topic=topic)
+
+ def service_get_all_by_host(self, context, host):
+ return self._manager.service_get_all_by(context, host=host)
+
+ def service_get_by_host_and_topic(self, context, host, topic):
+ return self._manager.service_get_all_by(context, topic, host)
+
+ def service_get_by_compute_host(self, context, host):
+ result = self._manager.service_get_all_by(context, 'compute', host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
+
+ def service_get_by_args(self, context, host, binary):
+ return self._manager.service_get_all_by(context, host=host,
+ binary=binary)
+
+ def action_event_start(self, context, values):
+ return self._manager.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self._manager.action_event_finish(context, values)
+
+ def service_create(self, context, values):
+ return self._manager.service_create(context, values)
+
+ def service_destroy(self, context, service_id):
+ return self._manager.service_destroy(context, service_id)
+
+
+class API(object):
+ """Conductor API that does updates via RPC to the ConductorManager."""
+
+ def __init__(self):
+ self.conductor_rpcapi = rpcapi.ConductorAPI()
+
+ def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
+ '''Wait until a conductor service is up and running.
+
+ This method calls the remote ping() method on the conductor topic until
+ it gets a response. It starts with a shorter timeout in the loop
+ (early_timeout) up to early_attempts number of tries. It then drops
+ back to the globally configured timeout for rpc calls for each retry.
+ '''
+ attempt = 0
+ timeout = early_timeout
+ while True:
+ # NOTE(danms): Try ten times with a short timeout, and then punt
+ # to the configured RPC timeout after that
+ if attempt == early_attempts:
+ timeout = None
+ attempt += 1
+
+ # NOTE(russellb): This is running during service startup. If we
+ # allow an exception to be raised, the service will shut down.
+ # This may fail the first time around if nova-conductor wasn't
+ # running when this service started.
+ try:
+ self.ping(context, '1.21 GigaWatts', timeout=timeout)
+ break
+ except rpc_common.Timeout as e:
+ LOG.exception(_('Timed out waiting for nova-conductor. '
+ 'Is it running? Or did this service start '
+ 'before nova-conductor?'))
+
+ def ping(self, context, arg, timeout=None):
+ return self.conductor_rpcapi.ping(context, arg, timeout)
+
+ def instance_update(self, context, instance_uuid, **updates):
+ """Perform an instance update in the database."""
+ return self.conductor_rpcapi.instance_update(context, instance_uuid,
+ updates)
+
+ def instance_destroy(self, context, instance):
+ return self.conductor_rpcapi.instance_destroy(context, instance)
+
+ def instance_get(self, context, instance_id):
+ return self.conductor_rpcapi.instance_get(context, instance_id)
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return self.conductor_rpcapi.instance_get_by_uuid(context,
+ instance_uuid)
+
+ def instance_get_all(self, context):
+ return self.conductor_rpcapi.instance_get_all(context)
+
+ def instance_get_all_by_host(self, context, host):
+ return self.conductor_rpcapi.instance_get_all_by_host(context, host)
+
+ def instance_get_all_by_filters(self, context, filters,
+ sort_key='created_at',
+ sort_dir='desc'):
+ return self.conductor_rpcapi.instance_get_all_by_filters(context,
+ filters,
+ sort_key,
+ sort_dir)
+
+ def instance_get_all_hung_in_rebooting(self, context, timeout):
+ return self.conductor_rpcapi.instance_get_all_hung_in_rebooting(
+ context, timeout)
+
+ def instance_get_active_by_window(self, context, begin, end=None,
+ project_id=None, host=None):
+ return self.conductor_rpcapi.instance_get_active_by_window(
+ context, begin, end, project_id, host)
+
+ def instance_info_cache_update(self, context, instance, values):
+ return self.conductor_rpcapi.instance_info_cache_update(context,
+ instance, values)
+
+ def instance_info_cache_delete(self, context, instance):
+ return self.conductor_rpcapi.instance_info_cache_delete(context,
+ instance)
+
+ def instance_type_get(self, context, instance_type_id):
+ return self.conductor_rpcapi.instance_type_get(context,
+ instance_type_id)
+
+ def migration_get(self, context, migration_id):
+ return self.conductor_rpcapi.migration_get(context, migration_id)
+
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ crpcapi = self.conductor_rpcapi
+ return crpcapi.migration_get_unconfirmed_by_dest_compute(
+ context, confirm_window, dest_compute)
+
+ def migration_create(self, context, instance, values):
+ return self.conductor_rpcapi.migration_create(context, instance,
+ values)
+
+ def migration_update(self, context, migration, status):
+ return self.conductor_rpcapi.migration_update(context, migration,
+ status)
+
+ def aggregate_host_add(self, context, aggregate, host):
+ return self.conductor_rpcapi.aggregate_host_add(context, aggregate,
+ host)
+
+ def aggregate_host_delete(self, context, aggregate, host):
+ return self.conductor_rpcapi.aggregate_host_delete(context, aggregate,
+ host)
+
+ def aggregate_get(self, context, aggregate_id):
+ return self.conductor_rpcapi.aggregate_get(context, aggregate_id)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ return self.conductor_rpcapi.aggregate_get_by_host(context, host, key)
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ return self.conductor_rpcapi.aggregate_metadata_add(context, aggregate,
+ metadata,
+ set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ return self.conductor_rpcapi.aggregate_metadata_delete(context,
+ aggregate,
+ key)
+
+ def bw_usage_get(self, context, uuid, start_period, mac):
+ return self.conductor_rpcapi.bw_usage_update(context, uuid, mac,
+ start_period)
+
+ def bw_usage_update(self, context, uuid, mac, start_period,
+ bw_in, bw_out, last_ctr_in, last_ctr_out,
+ last_refreshed=None):
+ return self.conductor_rpcapi.bw_usage_update(
+ context, uuid, mac, start_period,
+ bw_in, bw_out, last_ctr_in, last_ctr_out,
+ last_refreshed)
+
+ #NOTE(mtreinish): This doesn't work on multiple conductors without any
+ # topic calculation in conductor_rpcapi. So the host param isn't used
+ # currently.
+ def get_backdoor_port(self, context, host):
+ return self.conductor_rpcapi.get_backdoor_port(context)
+
+ def security_group_get_by_instance(self, context, instance):
+ return self.conductor_rpcapi.security_group_get_by_instance(context,
+ instance)
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ return self.conductor_rpcapi.security_group_rule_get_by_security_group(
+ context, secgroup)
+
+ def provider_fw_rule_get_all(self, context):
+ return self.conductor_rpcapi.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return self.conductor_rpcapi.agent_build_get_by_triple(context,
+ hypervisor,
+ os,
+ architecture)
+
+ def block_device_mapping_create(self, context, values):
+ return self.conductor_rpcapi.block_device_mapping_update_or_create(
+ context, values, create=True)
+
+ def block_device_mapping_update(self, context, bdm_id, values):
+ values = dict(values)
+ values['id'] = bdm_id
+ return self.conductor_rpcapi.block_device_mapping_update_or_create(
+ context, values, create=False)
+
+ def block_device_mapping_update_or_create(self, context, values):
+ return self.conductor_rpcapi.block_device_mapping_update_or_create(
+ context, values)
+
+ def block_device_mapping_get_all_by_instance(self, context, instance):
+ return self.conductor_rpcapi.block_device_mapping_get_all_by_instance(
+ context, instance)
+
+ def block_device_mapping_destroy(self, context, bdms):
+ return self.conductor_rpcapi.block_device_mapping_destroy(context,
+ bdms=bdms)
+
+ def block_device_mapping_destroy_by_instance_and_device(self, context,
+ instance,
+ device_name):
+ return self.conductor_rpcapi.block_device_mapping_destroy(
+ context, instance=instance, device_name=device_name)
+
+ def block_device_mapping_destroy_by_instance_and_volume(self, context,
+ instance,
+ volume_id):
+ return self.conductor_rpcapi.block_device_mapping_destroy(
+ context, instance=instance, volume_id=volume_id)
+
+ def vol_get_usage_by_time(self, context, start_time):
+ return self.conductor_rpcapi.vol_get_usage_by_time(context, start_time)
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ return self.conductor_rpcapi.vol_usage_update(context, vol_id,
+ rd_req, rd_bytes,
+ wr_req, wr_bytes,
+ instance, last_refreshed,
+ update_totals)
+
+ def service_get_all(self, context):
+ return self.conductor_rpcapi.service_get_all_by(context)
+
+ def service_get_all_by_topic(self, context, topic):
+ return self.conductor_rpcapi.service_get_all_by(context, topic=topic)
+
+ def service_get_all_by_host(self, context, host):
+ return self.conductor_rpcapi.service_get_all_by(context, host=host)
+
+ def service_get_by_host_and_topic(self, context, host, topic):
+ return self.conductor_rpcapi.service_get_all_by(context, topic, host)
+
+ def service_get_by_compute_host(self, context, host):
+ result = self.conductor_rpcapi.service_get_all_by(context, 'compute',
+ host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
+
+ def service_get_by_args(self, context, host, binary):
+ return self.conductor_rpcapi.service_get_all_by(context, host=host,
+ binary=binary)
+
+ def action_event_start(self, context, values):
+ return self.conductor_rpcapi.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self.conductor_rpcapi.action_event_finish(context, values)
+
+ def service_create(self, context, values):
+ return self.conductor_rpcapi.service_create(context, values)
+
+ def service_destroy(self, context, service_id):
+ return self.conductor_rpcapi.service_destroy(context, service_id)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
new file mode 100644
index 000000000..b0d4011ad
--- /dev/null
+++ b/nova/conductor/manager.py
@@ -0,0 +1,293 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handles database requests from other nova services."""
+
+from nova import exception
+from nova import manager
+from nova import notifications
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+from nova.openstack.common.rpc import common as rpc_common
+from nova.openstack.common import timeutils
+
+
+LOG = logging.getLogger(__name__)
+
+# Instead of having a huge list of arguments to instance_update(), we just
+# accept a dict of fields to update and use this whitelist to validate it.
+allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
+ 'power_state', 'access_ip_v4', 'access_ip_v6',
+ 'launched_at', 'terminated_at', 'host', 'node',
+ 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
+ 'instance_type_id', 'root_device_name', 'launched_on',
+ 'progress', 'vm_mode', 'default_ephemeral_device',
+ 'default_swap_device', 'root_device_name',
+ ]
+
+# Fields that we want to convert back into a datetime object.
+datetime_fields = ['launched_at', 'terminated_at']
+
+
+class ConductorManager(manager.SchedulerDependentManager):
+ """Mission: TBD."""
+
+ RPC_API_VERSION = '1.30'
+
+ def __init__(self, *args, **kwargs):
+ super(ConductorManager, self).__init__(service_name='conductor',
+ *args, **kwargs)
+
+ def ping(self, context, arg):
+ return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
+
+ @rpc_common.client_exceptions(KeyError, ValueError,
+ exception.InvalidUUID,
+ exception.InstanceNotFound,
+ exception.UnexpectedTaskStateError)
+ def instance_update(self, context, instance_uuid, updates):
+ for key, value in updates.iteritems():
+ if key not in allowed_updates:
+ LOG.error(_("Instance update attempted for "
+ "'%(key)s' on %(instance_uuid)s") % locals())
+ raise KeyError("unexpected update keyword '%s'" % key)
+ if key in datetime_fields and isinstance(value, basestring):
+ updates[key] = timeutils.parse_strtime(value)
+
+ old_ref, instance_ref = self.db.instance_update_and_get_original(
+ context, instance_uuid, updates)
+ notifications.send_update(context, old_ref, instance_ref)
+ return jsonutils.to_primitive(instance_ref)
+
+ @rpc_common.client_exceptions(exception.InstanceNotFound)
+ def instance_get(self, context, instance_id):
+ return jsonutils.to_primitive(
+ self.db.instance_get(context, instance_id))
+
+ @rpc_common.client_exceptions(exception.InstanceNotFound)
+ def instance_get_by_uuid(self, context, instance_uuid):
+ return jsonutils.to_primitive(
+ self.db.instance_get_by_uuid(context, instance_uuid))
+
+ def instance_get_all(self, context):
+ return jsonutils.to_primitive(self.db.instance_get_all(context))
+
+ def instance_get_all_by_host(self, context, host):
+ return jsonutils.to_primitive(
+ self.db.instance_get_all_by_host(context.elevated(), host))
+
+ @rpc_common.client_exceptions(exception.MigrationNotFound)
+ def migration_get(self, context, migration_id):
+ migration_ref = self.db.migration_get(context.elevated(),
+ migration_id)
+ return jsonutils.to_primitive(migration_ref)
+
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ migrations = self.db.migration_get_unconfirmed_by_dest_compute(
+ context, confirm_window, dest_compute)
+ return jsonutils.to_primitive(migrations)
+
+ def migration_create(self, context, instance, values):
+ values.update({'instance_uuid': instance['uuid'],
+ 'source_compute': instance['host'],
+ 'source_node': instance['node']})
+ migration_ref = self.db.migration_create(context.elevated(), values)
+ return jsonutils.to_primitive(migration_ref)
+
+ @rpc_common.client_exceptions(exception.MigrationNotFound)
+ def migration_update(self, context, migration, status):
+ migration_ref = self.db.migration_update(context.elevated(),
+ migration['id'],
+ {'status': status})
+ return jsonutils.to_primitive(migration_ref)
+
+ @rpc_common.client_exceptions(exception.AggregateHostExists)
+ def aggregate_host_add(self, context, aggregate, host):
+ host_ref = self.db.aggregate_host_add(context.elevated(),
+ aggregate['id'], host)
+
+ return jsonutils.to_primitive(host_ref)
+
+ @rpc_common.client_exceptions(exception.AggregateHostNotFound)
+ def aggregate_host_delete(self, context, aggregate, host):
+ self.db.aggregate_host_delete(context.elevated(),
+ aggregate['id'], host)
+
+ @rpc_common.client_exceptions(exception.AggregateNotFound)
+ def aggregate_get(self, context, aggregate_id):
+ aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
+ return jsonutils.to_primitive(aggregate)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ aggregates = self.db.aggregate_get_by_host(context.elevated(),
+ host, key)
+ return jsonutils.to_primitive(aggregates)
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ new_metadata = self.db.aggregate_metadata_add(context.elevated(),
+ aggregate['id'],
+ metadata, set_delete)
+ return jsonutils.to_primitive(new_metadata)
+
+ @rpc_common.client_exceptions(exception.AggregateMetadataNotFound)
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ self.db.aggregate_metadata_delete(context.elevated(),
+ aggregate['id'], key)
+
+ def bw_usage_update(self, context, uuid, mac, start_period,
+ bw_in=None, bw_out=None,
+ last_ctr_in=None, last_ctr_out=None,
+ last_refreshed=None):
+ if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
+ self.db.bw_usage_update(context, uuid, mac, start_period,
+ bw_in, bw_out, last_ctr_in, last_ctr_out,
+ last_refreshed)
+ usage = self.db.bw_usage_get(context, uuid, start_period, mac)
+ return jsonutils.to_primitive(usage)
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
+
+ def security_group_get_by_instance(self, context, instance):
+ group = self.db.security_group_get_by_instance(context,
+ instance['id'])
+ return jsonutils.to_primitive(group)
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ rule = self.db.security_group_rule_get_by_security_group(
+ context, secgroup['id'])
+ return jsonutils.to_primitive(rule)
+
+ def provider_fw_rule_get_all(self, context):
+ rules = self.db.provider_fw_rule_get_all(context)
+ return jsonutils.to_primitive(rules)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ info = self.db.agent_build_get_by_triple(context, hypervisor, os,
+ architecture)
+ return jsonutils.to_primitive(info)
+
+ def block_device_mapping_update_or_create(self, context, values,
+ create=None):
+ if create is None:
+ self.db.block_device_mapping_update_or_create(context, values)
+ elif create is True:
+ self.db.block_device_mapping_create(context, values)
+ else:
+ self.db.block_device_mapping_update(context, values['id'], values)
+
+ def block_device_mapping_get_all_by_instance(self, context, instance):
+ bdms = self.db.block_device_mapping_get_all_by_instance(
+ context, instance['uuid'])
+ return jsonutils.to_primitive(bdms)
+
+ def block_device_mapping_destroy(self, context, bdms=None,
+ instance=None, volume_id=None,
+ device_name=None):
+ if bdms is not None:
+ for bdm in bdms:
+ self.db.block_device_mapping_destroy(context, bdm['id'])
+ elif instance is not None and volume_id is not None:
+ self.db.block_device_mapping_destroy_by_instance_and_volume(
+ context, instance['uuid'], volume_id)
+ elif instance is not None and device_name is not None:
+ self.db.block_device_mapping_destroy_by_instance_and_device(
+ context, instance['uuid'], device_name)
+ else:
+ # NOTE(danms): This shouldn't happen
+ raise exception.Invalid(_("Invalid block_device_mapping_destroy"
+ " invocation"))
+
+ def instance_get_all_by_filters(self, context, filters, sort_key,
+ sort_dir):
+ result = self.db.instance_get_all_by_filters(context, filters,
+ sort_key, sort_dir)
+ return jsonutils.to_primitive(result)
+
+ def instance_get_all_hung_in_rebooting(self, context, timeout):
+ result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
+ return jsonutils.to_primitive(result)
+
+ def instance_get_active_by_window(self, context, begin, end=None,
+ project_id=None, host=None):
+ result = self.db.instance_get_active_by_window_joined(context,
+ begin, end,
+ project_id,
+ host)
+ return jsonutils.to_primitive(result)
+
+ def instance_destroy(self, context, instance):
+ self.db.instance_destroy(context, instance['uuid'])
+
+ def instance_info_cache_delete(self, context, instance):
+ self.db.instance_info_cache_delete(context, instance['uuid'])
+
+ def instance_info_cache_update(self, context, instance, values):
+ self.db.instance_info_cache_update(context, instance['uuid'],
+ values)
+
+ def instance_type_get(self, context, instance_type_id):
+ result = self.db.instance_type_get(context, instance_type_id)
+ return jsonutils.to_primitive(result)
+
+ def vol_get_usage_by_time(self, context, start_time):
+ result = self.db.vol_get_usage_by_time(context, start_time)
+ return jsonutils.to_primitive(result)
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ self.db.vol_usage_update(context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance['uuid'], last_refreshed,
+ update_totals)
+
+ @rpc_common.client_exceptions(exception.HostBinaryNotFound)
+ def service_get_all_by(self, context, topic=None, host=None, binary=None):
+ if not any((topic, host, binary)):
+ result = self.db.service_get_all(context)
+ elif all((topic, host)):
+ if topic == 'compute':
+ result = self.db.service_get_by_compute_host(context, host)
+ # FIXME(comstud) Potentially remove this on bump to v2.0
+ result = [result]
+ else:
+ result = self.db.service_get_by_host_and_topic(context,
+ host, topic)
+ elif all((host, binary)):
+ result = self.db.service_get_by_args(context, host, binary)
+ elif topic:
+ result = self.db.service_get_all_by_topic(context, topic)
+ elif host:
+ result = self.db.service_get_all_by_host(context, host)
+
+ return jsonutils.to_primitive(result)
+
+ def action_event_start(self, context, values):
+ evt = self.db.action_event_start(context, values)
+ return jsonutils.to_primitive(evt)
+
+ def action_event_finish(self, context, values):
+ evt = self.db.action_event_finish(context, values)
+ return jsonutils.to_primitive(evt)
+
+ def service_create(self, context, values):
+ svc = self.db.service_create(context, values)
+ return jsonutils.to_primitive(svc)
+
+ @rpc_common.client_exceptions(exception.ServiceNotFound)
+ def service_destroy(self, context, service_id):
+ self.db.service_destroy(context, service_id)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
new file mode 100644
index 000000000..b7f760cf5
--- /dev/null
+++ b/nova/conductor/rpcapi.py
@@ -0,0 +1,299 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Client side of the conductor RPC API."""
+
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
+import nova.openstack.common.rpc.proxy
+
+CONF = cfg.CONF
+
+
+class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
+ """Client side of the conductor RPC API
+
+ API version history:
+
+ 1.0 - Initial version.
+ 1.1 - Added migration_update
+ 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
+ 1.3 - Added aggregate_host_add and aggregate_host_delete
+ 1.4 - Added migration_get
+ 1.5 - Added bw_usage_update
+ 1.6 - Added get_backdoor_port()
+ 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
+ and aggregate_metadata_delete
+ 1.8 - Added security_group_get_by_instance and
+ security_group_rule_get_by_security_group
+ 1.9 - Added provider_fw_rule_get_all
+ 1.10 - Added agent_build_get_by_triple
+ 1.11 - Added aggregate_get
+ 1.12 - Added block_device_mapping_update_or_create
+ 1.13 - Added block_device_mapping_get_all_by_instance
+ 1.14 - Added block_device_mapping_destroy
+ 1.15 - Added instance_get_all_by_filters and
+ instance_get_all_hung_in_rebooting and
+ instance_get_active_by_window
+ Deprecated instance_get_all_by_host
+ 1.16 - Added instance_destroy
+ 1.17 - Added instance_info_cache_delete
+ 1.18 - Added instance_type_get
+ 1.19 - Added vol_get_usage_by_time and vol_usage_update
+ 1.20 - Added migration_get_unconfirmed_by_dest_compute
+ 1.21 - Added service_get_all_by
+ 1.22 - Added ping
+ 1.23 - Added instance_get_all
+ Un-Deprecate instance_get_all_by_host
+ 1.24 - Added instance_get
+ 1.25 - Added action_event_start and action_event_finish
+ 1.26 - Added instance_info_cache_update
+ 1.27 - Added service_create
+ 1.28 - Added binary arg to service_get_all_by
+ 1.29 - Added service_destroy
+ 1.30 - Added migration_create
+ """
+
+ BASE_RPC_API_VERSION = '1.0'
+
+ def __init__(self):
+ super(ConductorAPI, self).__init__(
+ topic=CONF.conductor.topic,
+ default_version=self.BASE_RPC_API_VERSION)
+
+ def ping(self, context, arg, timeout=None):
+ arg_p = jsonutils.to_primitive(arg)
+ msg = self.make_msg('ping', arg=arg_p)
+ return self.call(context, msg, version='1.22', timeout=timeout)
+
+ def instance_update(self, context, instance_uuid, updates):
+ updates_p = jsonutils.to_primitive(updates)
+ return self.call(context,
+ self.make_msg('instance_update',
+ instance_uuid=instance_uuid,
+ updates=updates_p))
+
+ def instance_get(self, context, instance_id):
+ msg = self.make_msg('instance_get',
+ instance_id=instance_id)
+ return self.call(context, msg, version='1.24')
+
+ def instance_get_by_uuid(self, context, instance_uuid):
+ msg = self.make_msg('instance_get_by_uuid',
+ instance_uuid=instance_uuid)
+ return self.call(context, msg, version='1.2')
+
+ def migration_get(self, context, migration_id):
+ msg = self.make_msg('migration_get', migration_id=migration_id)
+ return self.call(context, msg, version='1.4')
+
+ def migration_get_unconfirmed_by_dest_compute(self, context,
+ confirm_window,
+ dest_compute):
+ msg = self.make_msg('migration_get_unconfirmed_by_dest_compute',
+ confirm_window=confirm_window,
+ dest_compute=dest_compute)
+ return self.call(context, msg, version='1.20')
+
+ def migration_create(self, context, instance, values):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('migration_create', instance=instance_p,
+ values=values)
+ return self.call(context, msg, version='1.30')
+
+ def migration_update(self, context, migration, status):
+ migration_p = jsonutils.to_primitive(migration)
+ msg = self.make_msg('migration_update', migration=migration_p,
+ status=status)
+ return self.call(context, msg, version='1.1')
+
+ def aggregate_host_add(self, context, aggregate, host):
+ aggregate_p = jsonutils.to_primitive(aggregate)
+ msg = self.make_msg('aggregate_host_add', aggregate=aggregate_p,
+ host=host)
+ return self.call(context, msg, version='1.3')
+
+ def aggregate_host_delete(self, context, aggregate, host):
+ aggregate_p = jsonutils.to_primitive(aggregate)
+ msg = self.make_msg('aggregate_host_delete', aggregate=aggregate_p,
+ host=host)
+ return self.call(context, msg, version='1.3')
+
+ def aggregate_get(self, context, aggregate_id):
+ msg = self.make_msg('aggregate_get', aggregate_id=aggregate_id)
+ return self.call(context, msg, version='1.11')
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ msg = self.make_msg('aggregate_get_by_host', host=host, key=key)
+ return self.call(context, msg, version='1.7')
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ aggregate_p = jsonutils.to_primitive(aggregate)
+ msg = self.make_msg('aggregate_metadata_add', aggregate=aggregate_p,
+ metadata=metadata,
+ set_delete=set_delete)
+ return self.call(context, msg, version='1.7')
+
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ aggregate_p = jsonutils.to_primitive(aggregate)
+ msg = self.make_msg('aggregate_metadata_delete', aggregate=aggregate_p,
+ key=key)
+ return self.call(context, msg, version='1.7')
+
+ def bw_usage_update(self, context, uuid, mac, start_period,
+ bw_in=None, bw_out=None,
+ last_ctr_in=None, last_ctr_out=None,
+ last_refreshed=None):
+ msg = self.make_msg('bw_usage_update',
+ uuid=uuid, mac=mac, start_period=start_period,
+ bw_in=bw_in, bw_out=bw_out,
+ last_ctr_in=last_ctr_in, last_ctr_out=last_ctr_out,
+ last_refreshed=last_refreshed)
+ return self.call(context, msg, version='1.5')
+
+ def get_backdoor_port(self, context):
+ msg = self.make_msg('get_backdoor_port')
+ return self.call(context, msg, version='1.6')
+
+ def security_group_get_by_instance(self, context, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('security_group_get_by_instance',
+ instance=instance_p)
+ return self.call(context, msg, version='1.8')
+
+ def security_group_rule_get_by_security_group(self, context, secgroup):
+ secgroup_p = jsonutils.to_primitive(secgroup)
+ msg = self.make_msg('security_group_rule_get_by_security_group',
+ secgroup=secgroup_p)
+ return self.call(context, msg, version='1.8')
+
+ def provider_fw_rule_get_all(self, context):
+ msg = self.make_msg('provider_fw_rule_get_all')
+ return self.call(context, msg, version='1.9')
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ msg = self.make_msg('agent_build_get_by_triple',
+ hypervisor=hypervisor, os=os,
+ architecture=architecture)
+ return self.call(context, msg, version='1.10')
+
+ def block_device_mapping_update_or_create(self, context, values,
+ create=None):
+ msg = self.make_msg('block_device_mapping_update_or_create',
+ values=values, create=create)
+ return self.call(context, msg, version='1.12')
+
+ def block_device_mapping_get_all_by_instance(self, context, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('block_device_mapping_get_all_by_instance',
+ instance=instance_p)
+ return self.call(context, msg, version='1.13')
+
+ def block_device_mapping_destroy(self, context, bdms=None,
+ instance=None, volume_id=None,
+ device_name=None):
+ bdms_p = jsonutils.to_primitive(bdms)
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('block_device_mapping_destroy',
+ bdms=bdms_p,
+ instance=instance_p, volume_id=volume_id,
+ device_name=device_name)
+ return self.call(context, msg, version='1.14')
+
+ def instance_get_all_by_filters(self, context, filters, sort_key,
+ sort_dir):
+ msg = self.make_msg('instance_get_all_by_filters',
+ filters=filters, sort_key=sort_key,
+ sort_dir=sort_dir)
+ return self.call(context, msg, version='1.15')
+
+ def instance_get_all_hung_in_rebooting(self, context, timeout):
+ msg = self.make_msg('instance_get_all_hung_in_rebooting',
+ timeout=timeout)
+ return self.call(context, msg, version='1.15')
+
+ def instance_get_active_by_window(self, context, begin, end=None,
+ project_id=None, host=None):
+ msg = self.make_msg('instance_get_active_by_window',
+ begin=begin, end=end, project_id=project_id,
+ host=host)
+ return self.call(context, msg, version='1.15')
+
+ def instance_destroy(self, context, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('instance_destroy', instance=instance_p)
+ self.call(context, msg, version='1.16')
+
+ def instance_info_cache_delete(self, context, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('instance_info_cache_delete', instance=instance_p)
+ self.call(context, msg, version='1.17')
+
+ def instance_type_get(self, context, instance_type_id):
+ msg = self.make_msg('instance_type_get',
+ instance_type_id=instance_type_id)
+ return self.call(context, msg, version='1.18')
+
+ def vol_get_usage_by_time(self, context, start_time):
+ start_time_p = jsonutils.to_primitive(start_time)
+ msg = self.make_msg('vol_get_usage_by_time', start_time=start_time_p)
+ return self.call(context, msg, version='1.19')
+
+ def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance, last_refreshed=None,
+ update_totals=False):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('vol_usage_update', vol_id=vol_id, rd_req=rd_req,
+ rd_bytes=rd_bytes, wr_req=wr_req,
+ wr_bytes=wr_bytes,
+ instance=instance_p, last_refreshed=last_refreshed,
+ update_totals=update_totals)
+ return self.call(context, msg, version='1.19')
+
+ def service_get_all_by(self, context, topic=None, host=None, binary=None):
+ msg = self.make_msg('service_get_all_by', topic=topic, host=host,
+ binary=binary)
+ return self.call(context, msg, version='1.28')
+
+ def instance_get_all(self, context):
+ msg = self.make_msg('instance_get_all')
+ return self.call(context, msg, version='1.23')
+
+ def instance_get_all_by_host(self, context, host):
+ msg = self.make_msg('instance_get_all_by_host', host=host)
+ return self.call(context, msg, version='1.23')
+
+ def action_event_start(self, context, values):
+ msg = self.make_msg('action_event_start', values=values)
+ return self.call(context, msg, version='1.25')
+
+ def action_event_finish(self, context, values):
+ msg = self.make_msg('action_event_finish', values=values)
+ return self.call(context, msg, version='1.25')
+
+ def instance_info_cache_update(self, context, instance, values):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('instance_info_cache_update',
+ instance=instance_p,
+ values=values)
+ return self.call(context, msg, version='1.26')
+
+ def service_create(self, context, values):
+ msg = self.make_msg('service_create', values=values)
+ return self.call(context, msg, version='1.27')
+
+ def service_destroy(self, context, service_id):
+ msg = self.make_msg('service_destroy', service_id=service_id)
+ return self.call(context, msg, version='1.29')
diff --git a/nova/config.py b/nova/config.py
index 608a3ee53..4095dba75 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -18,12 +18,11 @@
# under the License.
from nova.openstack.common import cfg
-
-CONF = cfg.CONF
+from nova.openstack.common import rpc
def parse_args(argv, default_config_files=None):
- CONF.disable_interspersed_args()
- return argv[:1] + CONF(argv[1:],
- project='nova',
- default_config_files=default_config_files)
+ rpc.set_defaults(control_exchange='nova')
+ cfg.CONF(argv[1:],
+ project='nova',
+ default_config_files=default_config_files)
diff --git a/nova/console/api.py b/nova/console/api.py
index 5fb506fe0..57c5cb0e3 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -18,14 +18,14 @@
"""Handles ConsoleProxy API requests."""
from nova.compute import rpcapi as compute_rpcapi
-from nova import config
from nova.console import rpcapi as console_rpcapi
from nova.db import base
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import uuidutils
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('console_topic', 'nova.console.rpcapi')
class API(base.Base):
@@ -68,3 +68,8 @@ class API(base.Base):
else:
instance = self.db.instance_get(context, instance_uuid)
return instance
+
+ def get_backdoor_port(self, context, host):
+ topic = self._get_console_topic(context, host)
+ rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
+ return rpcapi.get_backdoor_port(context, host)
diff --git a/nova/console/manager.py b/nova/console/manager.py
index a2f48494b..2045f824d 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -20,9 +20,7 @@
import socket
from nova.compute import rpcapi as compute_rpcapi
-from nova import config
from nova import exception
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -42,7 +40,7 @@ console_manager_opts = [
help='Publicly visible name for this console host'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(console_manager_opts)
LOG = logging.getLogger(__name__)
@@ -54,7 +52,7 @@ class ConsoleProxyManager(manager.Manager):
"""
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
@@ -67,7 +65,6 @@ class ConsoleProxyManager(manager.Manager):
def init_host(self):
self.driver.init_host()
- @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
instance = self.db.instance_get(context, instance_id)
@@ -95,7 +92,6 @@ class ConsoleProxyManager(manager.Manager):
return console['id']
- @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get(context, console_id)
@@ -134,3 +130,6 @@ class ConsoleProxyManager(manager.Manager):
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/console/rpcapi.py b/nova/console/rpcapi.py
index 06e282dce..5a91a81cb 100644
--- a/nova/console/rpcapi.py
+++ b/nova/console/rpcapi.py
@@ -18,11 +18,17 @@
Client side of the console RPC API.
"""
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
import nova.openstack.common.rpc.proxy
-CONF = config.CONF
+rpcapi_opts = [
+ cfg.StrOpt('console_topic',
+ default='console',
+ help='the topic console proxy nodes listen on'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(rpcapi_opts)
class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -31,6 +37,7 @@ class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Added get_backdoor_port()
'''
#
@@ -54,3 +61,7 @@ class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def remove_console(self, ctxt, console_id):
self.cast(ctxt, self.make_msg('remove_console', console_id=console_id))
+
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ version='1.1')
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index ae66d5f05..fcd733cf9 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -19,9 +19,7 @@
import base64
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
@@ -36,7 +34,7 @@ vmrc_opts = [
help="number of retries for retrieving VMRC information"),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(vmrc_opts)
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index c40067daa..bb1818943 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -18,9 +18,7 @@
"""VMRC Console Manager."""
from nova.compute import rpcapi as compute_rpcapi
-from nova import config
from nova import exception
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -30,7 +28,7 @@ from nova.virt.vmwareapi import driver as vmwareapi_conn
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
CONF.import_opt('console_driver', 'nova.console.manager')
CONF.import_opt('console_public_hostname', 'nova.console.manager')
@@ -51,7 +49,7 @@ class ConsoleVMRCManager(manager.Manager):
"""Get VIM session for the pool specified."""
vim_session = None
if pool['id'] not in self.sessions.keys():
- vim_session = vmwareapi_conn.VMWareAPISession(
+ vim_session = vmwareapi_conn.VMwareAPISession(
pool['address'],
pool['username'],
pool['password'],
@@ -77,7 +75,6 @@ class ConsoleVMRCManager(manager.Manager):
self.driver.setup_console(context, console)
return console
- @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
"""Adds a console for the instance.
@@ -107,7 +104,6 @@ class ConsoleVMRCManager(manager.Manager):
instance)
return console['id']
- @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
"""Removes a console entry."""
try:
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 12a7f3fd3..ce2eb5350 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -22,19 +22,18 @@ import signal
from Cheetah import Template
-from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
+from nova import paths
from nova import utils
xvp_opts = [
cfg.StrOpt('console_xvp_conf_template',
- default='$pybasedir/nova/console/xvp.conf.template',
+ default=paths.basedir_def('nova/console/xvp.conf.template'),
help='XVP conf template'),
cfg.StrOpt('console_xvp_conf',
default='/etc/xvp.conf',
@@ -50,8 +49,9 @@ xvp_opts = [
help='port for XVP to multiplex VNC connections on'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xvp_opts)
+CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
@@ -114,9 +114,13 @@ class XVPConsoleProxy(object):
self._xvp_restart()
def _write_conf(self, config):
- LOG.debug(_('Re-wrote %s') % CONF.console_xvp_conf)
- with open(CONF.console_xvp_conf, 'w') as cfile:
- cfile.write(config)
+ try:
+ LOG.debug(_('Re-wrote %s') % CONF.console_xvp_conf)
+ with open(CONF.console_xvp_conf, 'w') as cfile:
+ cfile.write(config)
+ except IOError:
+ LOG.exception(_("Failed to write configuration file"))
+ raise
def _xvp_stop(self):
LOG.debug(_('Stopping xvp'))
@@ -194,4 +198,6 @@ class XVPConsoleProxy(object):
#xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp', flag, process_input=password)
+ if err:
+ raise exception.ProcessExecutionError(_("Failed to run xvp."))
return out.strip()
diff --git a/nova/consoleauth/__init__.py b/nova/consoleauth/__init__.py
index 11253ea91..c533043f3 100644
--- a/nova/consoleauth/__init__.py
+++ b/nova/consoleauth/__init__.py
@@ -18,8 +18,6 @@
"""Module to authenticate Consoles."""
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
@@ -27,5 +25,5 @@ consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
default='consoleauth',
help='the topic console auth proxy nodes listen on')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(consoleauth_topic_opt)
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index c772101eb..2dfc72435 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -20,8 +20,7 @@
import time
-from nova import config
-from nova import flags
+from nova.common import memorycache
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
@@ -39,24 +38,18 @@ consoleauth_opts = [
help='Manager for console auth'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(consoleauth_opts)
class ConsoleAuthManager(manager.Manager):
"""Manages token based authentication."""
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(*args, **kwargs)
-
- if CONF.memcached_servers:
- import memcache
- else:
- from nova.common import memorycache as memcache
- self.mc = memcache.Client(CONF.memcached_servers,
- debug=0)
+ self.mc = memorycache.get_client()
def authorize_console(self, context, token, console_type, host, port,
internal_access_path):
@@ -67,12 +60,15 @@ class ConsoleAuthManager(manager.Manager):
'internal_access_path': internal_access_path,
'last_activity_at': time.time()}
data = jsonutils.dumps(token_dict)
- self.mc.set(token, data, CONF.console_token_ttl)
+ self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl)
LOG.audit(_("Received Token: %(token)s, %(token_dict)s)"), locals())
def check_token(self, context, token):
- token_str = self.mc.get(token)
+ token_str = self.mc.get(token.encode('UTF-8'))
token_valid = (token_str is not None)
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s)"), locals())
if token_valid:
return jsonutils.loads(token_str)
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py
index 51d28cb04..64b915ec3 100644
--- a/nova/consoleauth/rpcapi.py
+++ b/nova/consoleauth/rpcapi.py
@@ -18,11 +18,10 @@
Client side of the consoleauth RPC API.
"""
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
import nova.openstack.common.rpc.proxy
-CONF = config.CONF
+CONF = cfg.CONF
class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -31,6 +30,7 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Added get_backdoor_port()
'''
#
@@ -60,3 +60,7 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def check_token(self, ctxt, token):
return self.call(ctxt, self.make_msg('check_token', token=token))
+
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ version='1.1')
diff --git a/nova/context.py b/nova/context.py
index 74f7a3c23..1a566cb5a 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -20,19 +20,19 @@
"""RequestContext: context for requests that persist through all of nova."""
import copy
+import uuid
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import policy
-from nova import utils
LOG = logging.getLogger(__name__)
def generate_request_id():
- return 'req-' + str(utils.gen_uuid())
+ return 'req-' + str(uuid.uuid4())
class RequestContext(object):
@@ -124,7 +124,9 @@ class RequestContext(object):
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
- 'instance_lock_checked': self.instance_lock_checked}
+ 'instance_lock_checked': self.instance_lock_checked,
+ 'tenant': self.tenant,
+ 'user': self.user}
@classmethod
def from_dict(cls, values):
@@ -143,6 +145,19 @@ class RequestContext(object):
return context
+ # NOTE(sirp): the openstack/common version of RequestContext uses
+ # tenant/user whereas the Nova version uses project_id/user_id. We need
+ # this shim in order to use context-aware code from openstack/common, like
+ # logging, until we make the switch to using openstack/common's version of
+ # RequestContext.
+ @property
+ def tenant(self):
+ return self.project_id
+
+ @property
+ def user(self):
+ return self.user_id
+
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
diff --git a/nova/crypto.py b/nova/crypto.py
index ef7b40419..ff76a54d0 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -28,15 +28,14 @@ import hashlib
import os
import string
-from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova import paths
from nova import utils
@@ -53,10 +52,10 @@ crypto_opts = [
default='crl.pem',
help=_('Filename of root Certificate Revocation List')),
cfg.StrOpt('keys_path',
- default='$state_path/keys',
+ default=paths.state_path_def('keys'),
help=_('Where we keep our keys')),
cfg.StrOpt('ca_path',
- default='$state_path/CA',
+ default=paths.state_path_def('CA'),
help=_('Where we keep our root CA')),
cfg.BoolOpt('use_project_ca',
default=False,
@@ -73,7 +72,7 @@ crypto_opts = [
'project, timestamp')),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(crypto_opts)
@@ -100,7 +99,7 @@ def fetch_ca(project_id=None):
project_id = None
ca_file_path = ca_path(project_id)
if not os.path.exists(ca_file_path):
- raise exception.CryptoCAFileNotFound(project_id=project_id)
+ raise exception.CryptoCAFileNotFound(project=project_id)
with open(ca_file_path, 'r') as cafile:
return cafile.read()
@@ -109,9 +108,8 @@ def ensure_ca_filesystem():
"""Ensure the CA filesystem exists."""
ca_dir = ca_folder()
if not os.path.exists(ca_path()):
- genrootca_sh_path = os.path.join(os.path.dirname(__file__),
- 'CA',
- 'genrootca.sh')
+ genrootca_sh_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), 'CA', 'genrootca.sh'))
start = os.getcwd()
fileutils.ensure_tree(ca_dir)
@@ -162,7 +160,7 @@ def fetch_crl(project_id):
project_id = None
crl_file_path = crl_path(project_id)
if not os.path.exists(crl_file_path):
- raise exception.CryptoCRLFileNotFound(project_id)
+ raise exception.CryptoCRLFileNotFound(project=project_id)
with open(crl_file_path, 'r') as crlfile:
return crlfile.read()
@@ -252,9 +250,8 @@ def generate_x509_cert(user_id, project_id, bits=1024):
def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
- geninter_sh_path = os.path.join(os.path.dirname(__file__),
- 'CA',
- 'geninter.sh')
+ geninter_sh_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), 'CA', 'geninter.sh'))
start = os.getcwd()
os.chdir(ca_folder())
utils.execute('sh', geninter_sh_path, project_id,
@@ -296,8 +293,12 @@ def _sign_csr(csr_text, ca_folder):
inbound = os.path.join(tmpdir, 'inbound.csr')
outbound = os.path.join(tmpdir, 'outbound.csr')
- with open(inbound, 'w') as csrfile:
- csrfile.write(csr_text)
+ try:
+ with open(inbound, 'w') as csrfile:
+ csrfile.write(csr_text)
+ except IOError:
+ LOG.exception(_('Failed to write inbound.csr'))
+ raise
LOG.debug(_('Flags path: %s'), ca_folder)
start = os.getcwd()
diff --git a/nova/db/api.py b/nova/db/api.py
index 757f101b3..ecfcfab15 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -43,9 +43,10 @@ these objects be simple dictionaries.
"""
+from nova.cells import rpcapi as cells_rpcapi
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
from nova import utils
@@ -59,19 +60,17 @@ db_opts = [
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
- cfg.StrOpt('volume_name_template',
- default='volume-%s',
- help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(db_opts)
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.api')
+LOG = logging.getLogger(__name__)
class NoMoreNetworks(exception.NovaException):
@@ -80,7 +79,7 @@ class NoMoreNetworks(exception.NovaException):
class NoMoreTargets(exception.NovaException):
- """No more available targets"""
+ """No more available targets."""
pass
@@ -133,6 +132,15 @@ def service_get_all(context, disabled=None):
return IMPL.service_get_all(context, disabled)
+def service_does_host_exist(context, host_name, include_disabled=False):
+ """Returns True if 'host_name' is found in the services table, False
+ otherwise
+ :param: host_name - the name of the host we want to check if it exists
+ :param: include_disabled - Set to True to include hosts from disabled
+ services"""
+ return IMPL.service_does_host_exist(context, host_name, include_disabled)
+
+
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
@@ -143,9 +151,12 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
-def service_get_all_compute_by_host(context, host):
- """Get all compute services for a given host."""
- return IMPL.service_get_all_compute_by_host(context, host)
+def service_get_by_compute_host(context, host):
+ """Get the service entry for a given compute host.
+
+ Returns the service entry joined with the compute_node entry.
+ """
+ return IMPL.service_get_by_compute_host(context, host)
def service_get_all_compute_sorted(context):
@@ -157,15 +168,6 @@ def service_get_all_compute_sorted(context):
return IMPL.service_get_all_compute_sorted(context)
-def service_get_all_volume_sorted(context):
- """Get all volume services sorted by volume count.
-
- :returns: a list of (Service, volume_count) tuples.
-
- """
- return IMPL.service_get_all_volume_sorted(context)
-
-
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
@@ -256,7 +258,7 @@ def floating_ip_get(context, id):
def floating_ip_get_pools(context):
- """Returns a list of floating ip pools"""
+ """Returns a list of floating ip pools."""
return IMPL.floating_ip_get_pools(context)
@@ -303,7 +305,8 @@ def floating_ip_destroy(context, address):
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
- :returns: the address of the existing fixed ip.
+ :returns: the address of the previous fixed ip or None
+ if the ip was not associated to an ip.
"""
return IMPL.floating_ip_disassociate(context, address)
@@ -311,7 +314,12 @@ def floating_ip_disassociate(context, address):
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
- """Associate a floating ip to a fixed_ip by address."""
+ """Associate a floating ip to a fixed_ip by address.
+
+ :returns: the address of the new fixed ip (fixed_address) or None
+ if the ip was already associated to the fixed ip.
+ """
+
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
@@ -339,12 +347,12 @@ def floating_ip_get_by_address(context, address):
def floating_ip_get_by_fixed_address(context, fixed_address):
- """Get a floating ips by fixed address"""
+ """Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
- """Get a floating ips by fixed address"""
+ """Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
@@ -354,7 +362,7 @@ def floating_ip_update(context, address, values):
def floating_ip_set_auto_assigned(context, address):
- """Set auto_assigned flag to floating ip"""
+ """Set auto_assigned flag to floating ip."""
return IMPL.floating_ip_set_auto_assigned(context, address)
@@ -364,17 +372,17 @@ def dnsdomain_list(context):
def dnsdomain_register_for_zone(context, fqdomain, zone):
- """Associated a DNS domain with an availability zone"""
+ """Associated a DNS domain with an availability zone."""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
- """Associated a DNS domain with a project id"""
+ """Associated a DNS domain with a project id."""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
- """Purge associations for the specified DNS zone"""
+ """Purge associations for the specified DNS zone."""
return IMPL.dnsdomain_unregister(context, fqdomain)
@@ -417,6 +425,13 @@ def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
confirm_window, dest_compute)
+def migration_get_in_progress_by_host_and_node(context, host, node):
+ """Finds all migrations for the given host + node that are not yet
+ confirmed or reverted.
+ """
+ return IMPL.migration_get_in_progress_by_host_and_node(context, host, node)
+
+
####################
@@ -477,6 +492,11 @@ def fixed_ip_get_by_address(context, address):
return IMPL.fixed_ip_get_by_address(context, address)
+def fixed_ip_get_by_address_detailed(context, address):
+ """Get detailed fixed ip info by address or raise if it does not exist."""
+ return IMPL.fixed_ip_get_by_address_detailed(context, address)
+
+
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
@@ -492,11 +512,6 @@ def fixed_ips_by_virtual_interface(context, vif_id):
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
-def fixed_ip_get_network(context, address):
- """Get a network for a fixed ip by address."""
- return IMPL.fixed_ip_get_network(context, address)
-
-
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
@@ -510,7 +525,7 @@ def virtual_interface_create(context, values):
def virtual_interface_get(context, vif_id):
- """Gets a virtual interface from the table,"""
+ """Gets a virtual interface from the table."""
return IMPL.virtual_interface_get(context, vif_id)
@@ -548,7 +563,7 @@ def virtual_interface_delete_by_instance(context, instance_id):
def virtual_interface_get_all(context):
- """Gets all virtual interfaces from the table"""
+ """Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
@@ -566,9 +581,16 @@ def instance_data_get_for_project(context, project_id, session=None):
session=session)
-def instance_destroy(context, instance_uuid, constraint=None):
+def instance_destroy(context, instance_uuid, constraint=None,
+ update_cells=True):
"""Destroy the instance or raise if it does not exist."""
- return IMPL.instance_destroy(context, instance_uuid, constraint)
+ rv = IMPL.instance_destroy(context, instance_uuid, constraint)
+ if update_cells:
+ try:
+ cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance destroy"))
+ return rv
def instance_get_by_uuid(context, uuid):
@@ -626,6 +648,11 @@ def instance_get_all_by_host(context, host):
return IMPL.instance_get_all_by_host(context, host)
+def instance_get_all_by_host_and_node(context, host, node):
+ """Get all instances belonging to a node."""
+ return IMPL.instance_get_all_by_host_and_node(context, host, node)
+
+
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
@@ -642,7 +669,7 @@ def instance_get_floating_address(context, instance_id):
def instance_floating_address_get_all(context, instance_uuid):
- """Get all floating ip addresses of an instance"""
+ """Get all floating ip addresses of an instance."""
return IMPL.instance_floating_address_get_all(context, instance_uuid)
@@ -660,13 +687,19 @@ def instance_test_and_set(context, instance_uuid, attr, ok_states,
ok_states, new_state)
-def instance_update(context, instance_uuid, values):
+def instance_update(context, instance_uuid, values, update_cells=True):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
- return IMPL.instance_update(context, instance_uuid, values)
+ rv = IMPL.instance_update(context, instance_uuid, values)
+ if update_cells:
+ try:
+ cells_rpcapi.CellsAPI().instance_update_at_top(context, rv)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance update"))
+ return rv
def instance_update_and_get_original(context, instance_uuid, values):
@@ -682,8 +715,12 @@ def instance_update_and_get_original(context, instance_uuid, values):
Raises NotFound if instance does not exist.
"""
- return IMPL.instance_update_and_get_original(context, instance_uuid,
- values)
+ rv = IMPL.instance_update_and_get_original(context, instance_uuid, values)
+ try:
+ cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1])
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance update"))
+ return rv
def instance_add_security_group(context, instance_id, security_group_id):
@@ -701,15 +738,6 @@ def instance_remove_security_group(context, instance_id, security_group_id):
###################
-def instance_info_cache_create(context, values):
- """Create a new instance cache record in the table.
-
- :param context: = request context object
- :param values: = dict containing column values
- """
- return IMPL.instance_info_cache_create(context, values)
-
-
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
@@ -718,13 +746,21 @@ def instance_info_cache_get(context, instance_uuid):
return IMPL.instance_info_cache_get(context, instance_uuid)
-def instance_info_cache_update(context, instance_uuid, values):
+def instance_info_cache_update(context, instance_uuid, values,
+ update_cells=True):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
- return IMPL.instance_info_cache_update(context, instance_uuid, values)
+ rv = IMPL.instance_info_cache_update(context, instance_uuid, values)
+ try:
+ cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(context,
+ rv)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance info cache "
+ "update"))
+ return rv
def instance_info_cache_delete(context, instance_uuid):
@@ -748,11 +784,6 @@ def key_pair_destroy(context, user_id, name):
return IMPL.key_pair_destroy(context, user_id, name)
-def key_pair_destroy_all_by_user(context, user_id):
- """Destroy all key_pairs by user."""
- return IMPL.key_pair_destroy_all_by_user(context, user_id)
-
-
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
@@ -776,11 +807,6 @@ def network_associate(context, project_id, network_id=None, force=False):
return IMPL.network_associate(context, project_id, network_id, force)
-def network_count(context):
- """Return the number of networks."""
- return IMPL.network_count(context)
-
-
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
@@ -805,14 +831,12 @@ def network_delete_safe(context, network_id):
return IMPL.network_delete_safe(context, network_id)
-def network_create_fixed_ips(context, network_id, num_vpn_clients):
- """Create the ips for the network, reserving sepecified ips."""
- return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
-
-
-def network_disassociate(context, network_id):
- """Disassociate the network from project or raise if it does not exist."""
- return IMPL.network_disassociate(context, network_id)
+def network_disassociate(context, network_id, disassociate_host=True,
+ disassociate_project=True):
+ """Disassociate the network from project or host and raise if it does
+ not exist."""
+ return IMPL.network_disassociate(context, network_id, disassociate_host,
+ disassociate_project)
def network_get(context, network_id, project_only="allow_none"):
@@ -834,6 +858,10 @@ def network_get_all_by_uuids(context, network_uuids,
# pylint: disable=C0103
+def network_in_use_on_host(context, network_id, host=None):
+ """Indicates if a network is currently in use on host."""
+ return IMPL.network_in_use_on_host(context, network_id, host)
+
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
@@ -851,7 +879,7 @@ def network_get_by_uuid(context, uuid):
def network_get_by_cidr(context, cidr):
- """Get a network by cidr or raise if it does not exist"""
+ """Get a network by cidr or raise if it does not exist."""
return IMPL.network_get_by_cidr(context, cidr)
@@ -870,16 +898,6 @@ def network_get_all_by_host(context, host):
return IMPL.network_get_all_by_host(context, host)
-def network_get_index(context, network_id):
- """Get non-conflicting index for network."""
- return IMPL.network_get_index(context, network_id)
-
-
-def network_set_cidr(context, network_id, cidr):
- """Set the Classless Inner Domain Routing for the network."""
- return IMPL.network_set_cidr(context, network_id, cidr)
-
-
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
@@ -936,11 +954,6 @@ def quota_update(context, project_id, resource, limit):
return IMPL.quota_update(context, project_id, resource, limit)
-def quota_destroy(context, project_id, resource):
- """Destroy the quota or raise if it does not exist."""
- return IMPL.quota_destroy(context, project_id, resource)
-
-
###################
@@ -964,26 +977,9 @@ def quota_class_update(context, class_name, resource, limit):
return IMPL.quota_class_update(context, class_name, resource, limit)
-def quota_class_destroy(context, class_name, resource):
- """Destroy the quota class or raise if it does not exist."""
- return IMPL.quota_class_destroy(context, class_name, resource)
-
-
-def quota_class_destroy_all_by_name(context, class_name):
- """Destroy all quotas associated with a given quota class."""
- return IMPL.quota_class_destroy_all_by_name(context, class_name)
-
-
###################
-def quota_usage_create(context, project_id, resource, in_use, reserved,
- until_refresh):
- """Create a quota usage for the given project and resource."""
- return IMPL.quota_usage_create(context, project_id, resource,
- in_use, reserved, until_refresh)
-
-
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
@@ -999,11 +995,6 @@ def quota_usage_update(context, project_id, resource, **kwargs):
return IMPL.quota_usage_update(context, project_id, resource, **kwargs)
-def quota_usage_destroy(context, project_id, resource):
- """Destroy the quota usage or raise if it does not exist."""
- return IMPL.quota_usage_destroy(context, project_id, resource)
-
-
###################
@@ -1019,11 +1010,6 @@ def reservation_get(context, uuid):
return IMPL.reservation_get(context, uuid)
-def reservation_get_all_by_project(context, project_id):
- """Retrieve all reservations associated with a given project."""
- return IMPL.reservation_get_all_by_project(context, project_id)
-
-
def reservation_destroy(context, uuid):
"""Destroy the reservation or raise if it does not exist."""
return IMPL.reservation_destroy(context, uuid)
@@ -1033,20 +1019,22 @@ def reservation_destroy(context, uuid):
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age)
+ until_refresh, max_age, project_id=project_id)
-def reservation_commit(context, reservations):
+def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
- return IMPL.reservation_commit(context, reservations)
+ return IMPL.reservation_commit(context, reservations,
+ project_id=project_id)
-def reservation_rollback(context, reservations):
+def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
- return IMPL.reservation_rollback(context, reservations)
+ return IMPL.reservation_rollback(context, reservations,
+ project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
@@ -1062,81 +1050,11 @@ def reservation_expire(context):
###################
-def volume_allocate_iscsi_target(context, volume_id, host):
- """Atomically allocate a free iscsi_target from the pool."""
- return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
-
-
-def volume_attached(context, volume_id, instance_id, mountpoint):
- """Ensure that a volume is set as attached."""
- return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
-
-
-def volume_create(context, values):
- """Create a volume from the values dictionary."""
- return IMPL.volume_create(context, values)
-
-
-def volume_data_get_for_project(context, project_id, session=None):
- """Get (volume_count, gigabytes) for project."""
- return IMPL.volume_data_get_for_project(context, project_id,
- session=session)
-
-
-def volume_destroy(context, volume_id):
- """Destroy the volume or raise if it does not exist."""
- return IMPL.volume_destroy(context, volume_id)
-
-
-def volume_detached(context, volume_id):
- """Ensure that a volume is set as detached."""
- return IMPL.volume_detached(context, volume_id)
-
-
-def volume_get(context, volume_id):
- """Get a volume or raise if it does not exist."""
- return IMPL.volume_get(context, volume_id)
-
-
-def volume_get_all(context):
- """Get all volumes."""
- return IMPL.volume_get_all(context)
-
-
-def volume_get_all_by_host(context, host):
- """Get all volumes belonging to a host."""
- return IMPL.volume_get_all_by_host(context, host)
-
-
-def volume_get_all_by_instance_uuid(context, instance_uuid):
- """Get all volumes belonging to an instance."""
- return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
-
-
-def volume_get_all_by_project(context, project_id):
- """Get all volumes belonging to a project."""
- return IMPL.volume_get_all_by_project(context, project_id)
-
-
-def volume_get_by_ec2_id(context, ec2_id):
- """Get a volume by ec2 id."""
- return IMPL.volume_get_by_ec2_id(context, ec2_id)
-
-
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
-def volume_update(context, volume_id, values):
- """Set the given properties on a volume and update it.
-
- Raises NotFound if volume does not exist.
-
- """
- return IMPL.volume_update(context, volume_id, values)
-
-
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
@@ -1164,55 +1082,13 @@ def ec2_snapshot_create(context, snapshot_id, forced_id=None):
####################
-def snapshot_create(context, values):
- """Create a snapshot from the values dictionary."""
- return IMPL.snapshot_create(context, values)
-
-
-def snapshot_destroy(context, snapshot_id):
- """Destroy the snapshot or raise if it does not exist."""
- return IMPL.snapshot_destroy(context, snapshot_id)
-
-
-def snapshot_get(context, snapshot_id):
- """Get a snapshot or raise if it does not exist."""
- return IMPL.snapshot_get(context, snapshot_id)
-
-
-def snapshot_get_all(context):
- """Get all snapshots."""
- return IMPL.snapshot_get_all(context)
-
-
-def snapshot_get_all_by_project(context, project_id):
- """Get all snapshots belonging to a project."""
- return IMPL.snapshot_get_all_by_project(context, project_id)
-
-
-def snapshot_get_all_for_volume(context, volume_id):
- """Get all snapshots for a volume."""
- return IMPL.snapshot_get_all_for_volume(context, volume_id)
-
-
-def snapshot_update(context, snapshot_id, values):
- """Set the given properties on a snapshot and update it.
-
- Raises NotFound if snapshot does not exist.
-
- """
- return IMPL.snapshot_update(context, snapshot_id, values)
-
-
-####################
-
-
def block_device_mapping_create(context, values):
- """Create an entry of block device mapping"""
+ """Create an entry of block device mapping."""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
- """Update an entry of block device mapping"""
+ """Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values)
@@ -1223,7 +1099,7 @@ def block_device_mapping_update_or_create(context, values):
def block_device_mapping_get_all_by_instance(context, instance_uuid):
- """Get all block device mapping belonging to an instance"""
+ """Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
@@ -1479,6 +1355,34 @@ def instance_type_access_remove(context, flavor_id, project_id):
####################
+def cell_create(context, values):
+ """Create a new child Cell entry."""
+ return IMPL.cell_create(context, values)
+
+
+def cell_update(context, cell_name, values):
+ """Update a child Cell entry."""
+ return IMPL.cell_update(context, cell_name, values)
+
+
+def cell_delete(context, cell_name):
+ """Delete a child Cell."""
+ return IMPL.cell_delete(context, cell_name)
+
+
+def cell_get(context, cell_name):
+ """Get a specific child Cell."""
+ return IMPL.cell_get(context, cell_name)
+
+
+def cell_get_all(context):
+ """Get all child Cells."""
+ return IMPL.cell_get_all(context)
+
+
+####################
+
+
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
@@ -1492,7 +1396,7 @@ def instance_metadata_delete(context, instance_uuid, key):
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
- metadata, delete)
+ metadata, delete)
####################
@@ -1503,11 +1407,6 @@ def instance_system_metadata_get(context, instance_uuid):
return IMPL.instance_system_metadata_get(context, instance_uuid)
-def instance_system_metadata_delete(context, instance_uuid, key):
- """Delete the given system metadata item."""
- IMPL.instance_system_metadata_delete(context, instance_uuid, key)
-
-
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
@@ -1528,9 +1427,9 @@ def agent_build_get_by_triple(context, hypervisor, os, architecture):
architecture)
-def agent_build_get_all(context):
+def agent_build_get_all(context, hypervisor=None):
"""Get all agent builds."""
- return IMPL.agent_build_get_all(context)
+ return IMPL.agent_build_get_all(context, hypervisor)
def agent_build_destroy(context, agent_update_id):
@@ -1557,12 +1456,21 @@ def bw_usage_get_by_uuids(context, uuids, start_period):
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
- last_ctr_in, last_ctr_out, last_refreshed=None):
+ last_ctr_in, last_ctr_out, last_refreshed=None,
+ update_cells=True):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
- return IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
+ rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed)
+ if update_cells:
+ try:
+ cells_rpcapi.CellsAPI().bw_usage_update_at_top(context,
+ uuid, mac, start_period, bw_in, bw_out,
+ last_ctr_in, last_ctr_out, last_refreshed)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of bw_usage update"))
+ return rv
####################
@@ -1586,195 +1494,45 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
-##################
-
-
-def volume_metadata_get(context, volume_id):
- """Get all metadata for a volume."""
- return IMPL.volume_metadata_get(context, volume_id)
-
-
-def volume_metadata_delete(context, volume_id, key):
- """Delete the given metadata item."""
- IMPL.volume_metadata_delete(context, volume_id, key)
-
-
-def volume_metadata_update(context, volume_id, metadata, delete):
- """Update metadata if it exists, otherwise create it."""
- IMPL.volume_metadata_update(context, volume_id, metadata, delete)
-
-
-##################
-
-
-def volume_type_create(context, values):
- """Create a new volume type."""
- return IMPL.volume_type_create(context, values)
-
-
-def volume_type_get_all(context, inactive=False):
- """Get all volume types."""
- return IMPL.volume_type_get_all(context, inactive)
-
-
-def volume_type_get(context, id):
- """Get volume type by id."""
- return IMPL.volume_type_get(context, id)
-
-
-def volume_type_get_by_name(context, name):
- """Get volume type by name."""
- return IMPL.volume_type_get_by_name(context, name)
-
-
-def volume_type_destroy(context, name):
- """Delete a volume type."""
- return IMPL.volume_type_destroy(context, name)
-
-
-def volume_get_active_by_window(context, begin, end=None, project_id=None):
- """Get all the volumes inside the window.
-
- Specifying a project_id will filter for a certain project."""
- return IMPL.volume_get_active_by_window(context, begin, end, project_id)
-
-
-####################
-
-
-def volume_type_extra_specs_get(context, volume_type_id):
- """Get all extra specs for a volume type."""
- return IMPL.volume_type_extra_specs_get(context, volume_type_id)
+###################
-def volume_type_extra_specs_delete(context, volume_type_id, key):
- """Delete the given extra specs item."""
- IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
+def vol_get_usage_by_time(context, begin):
+ """Return volumes usage that have been updated after a specified time."""
+ return IMPL.vol_get_usage_by_time(context, begin)
-def volume_type_extra_specs_update_or_create(context, volume_type_id,
- extra_specs):
- """Create or update volume type extra specs. This adds or modifies the
- key/value pairs specified in the extra specs dict argument"""
- IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
- extra_specs)
+def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
+ instance_id, last_refreshed=None, update_totals=False):
+ """Update cached volume usage for a volume
+ Creates new record if needed."""
+ return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
+ wr_bytes, instance_id,
+ last_refreshed=last_refreshed,
+ update_totals=update_totals)
###################
def s3_image_get(context, image_id):
- """Find local s3 image represented by the provided id"""
+ """Find local s3 image represented by the provided id."""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
- """Find local s3 image represented by the provided uuid"""
+ """Find local s3 image represented by the provided uuid."""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
- """Create local s3 image represented by provided uuid"""
+ """Create local s3 image represented by provided uuid."""
return IMPL.s3_image_create(context, image_uuid)
####################
-def sm_backend_conf_create(context, values):
- """Create a new SM Backend Config entry."""
- return IMPL.sm_backend_conf_create(context, values)
-
-
-def sm_backend_conf_update(context, sm_backend_conf_id, values):
- """Update a SM Backend Config entry."""
- return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values)
-
-
-def sm_backend_conf_delete(context, sm_backend_conf_id):
- """Delete a SM Backend Config."""
- return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
-
-
-def sm_backend_conf_get(context, sm_backend_conf_id):
- """Get a specific SM Backend Config."""
- return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
-
-
-def sm_backend_conf_get_by_sr(context, sr_uuid):
- """Get a specific SM Backend Config."""
- return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid)
-
-
-def sm_backend_conf_get_all(context):
- """Get all SM Backend Configs."""
- return IMPL.sm_backend_conf_get_all(context)
-
-
-####################
-
-
-def sm_flavor_create(context, values):
- """Create a new SM Flavor entry."""
- return IMPL.sm_flavor_create(context, values)
-
-
-def sm_flavor_update(context, sm_flavor_id, values):
- """Update a SM Flavor entry."""
- return IMPL.sm_flavor_update(context, sm_flavor_id, values)
-
-
-def sm_flavor_delete(context, sm_flavor_id):
- """Delete a SM Flavor."""
- return IMPL.sm_flavor_delete(context, sm_flavor_id)
-
-
-def sm_flavor_get(context, sm_flavor_id):
- """Get a specific SM Flavor."""
- return IMPL.sm_flavor_get(context, sm_flavor_id)
-
-
-def sm_flavor_get_all(context):
- """Get all SM Flavors."""
- return IMPL.sm_flavor_get_all(context)
-
-
-def sm_flavor_get_by_label(context, sm_flavor_label):
- """Get a specific SM Flavor given label."""
- return IMPL.sm_flavor_get_by_label(context, sm_flavor_label)
-
-
-####################
-
-
-def sm_volume_create(context, values):
- """Create a new child Zone entry."""
- return IMPL.sm_volume_create(context, values)
-
-
-def sm_volume_update(context, volume_id, values):
- """Update a child Zone entry."""
- return IMPL.sm_volume_update(context, values)
-
-
-def sm_volume_delete(context, volume_id):
- """Delete a child Zone."""
- return IMPL.sm_volume_delete(context, volume_id)
-
-
-def sm_volume_get(context, volume_id):
- """Get a specific child Zone."""
- return IMPL.sm_volume_get(context, volume_id)
-
-
-def sm_volume_get_all(context):
- """Get all child Zones."""
- return IMPL.sm_volume_get_all(context)
-
-
-####################
-
-
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
@@ -1786,7 +1544,7 @@ def aggregate_get(context, aggregate_id):
def aggregate_get_by_host(context, host, key=None):
- """Get a list of aggregates that host belongs to"""
+ """Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
@@ -1799,6 +1557,16 @@ def aggregate_metadata_get_by_host(context, host, key=None):
return IMPL.aggregate_metadata_get_by_host(context, host, key)
+def aggregate_host_get_by_metadata_key(context, key):
+ """Get hosts with a specific metadata key metadata for all aggregates.
+
+ Returns a dictionary where each key is a hostname and each value is a set
+ of the key values
+ return value: {machine: set( az1, az2 )}
+ """
+ return IMPL.aggregate_host_get_by_metadata_key(context, key)
+
+
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates. If values contains a metadata
key, it updates the aggregate metadata too."""
@@ -1848,9 +1616,15 @@ def aggregate_host_delete(context, aggregate_id, host):
####################
-def instance_fault_create(context, values):
+def instance_fault_create(context, values, update_cells=True):
"""Create a new Instance Fault."""
- return IMPL.instance_fault_create(context, values)
+ rv = IMPL.instance_fault_create(context, values)
+ if update_cells:
+ try:
+ cells_rpcapi.CellsAPI().instance_fault_create_at_top(context, rv)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance fault"))
+ return rv
def instance_fault_get_by_instance_uuids(context, instance_uuids):
@@ -1861,19 +1635,60 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
####################
+def action_start(context, values):
+ """Start an action for an instance."""
+ return IMPL.action_start(context, values)
+
+
+def action_finish(context, values):
+ """Finish an action for an instance."""
+ return IMPL.action_finish(context, values)
+
+
+def actions_get(context, uuid):
+ """Get all instance actions for the provided instance."""
+ return IMPL.actions_get(context, uuid)
+
+
+def action_get_by_id(context, uuid, action_id):
+ """Get the action by id and given instance."""
+ return IMPL.action_get_by_id(context, uuid, action_id)
+
+
+def action_event_start(context, values):
+ """Start an event on an instance action."""
+ return IMPL.action_event_start(context, values)
+
+
+def action_event_finish(context, values):
+ """Finish an event on an instance action."""
+ return IMPL.action_event_finish(context, values)
+
+
+def action_events_get(context, action_id):
+ return IMPL.action_events_get(context, action_id)
+
+
+def action_event_get_by_id(context, action_id, event_id):
+ return IMPL.action_event_get_by_id(context, action_id, event_id)
+
+
+####################
+
+
def get_ec2_instance_id_by_uuid(context, instance_id):
- """Get ec2 id through uuid from instance_id_mappings table"""
+ """Get ec2 id through uuid from instance_id_mappings table."""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
- """Get uuid through ec2 id from instance_id_mappings table"""
+ """Get uuid through ec2 id from instance_id_mappings table."""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
-def ec2_instance_create(context, instance_ec2_id):
- """Create the ec2 id to instance uuid mapping on demand"""
- return IMPL.ec2_instance_create(context, instance_ec2_id)
+def ec2_instance_create(context, instance_uuid, id=None):
+ """Create the ec2 id to instance uuid mapping on demand."""
+ return IMPL.ec2_instance_create(context, instance_uuid, id)
####################
@@ -1886,7 +1701,7 @@ def task_log_end_task(context, task_name,
errors,
message=None,
session=None):
- """Mark a task as complete for a given host/time period"""
+ """Mark a task as complete for a given host/time period."""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
@@ -1903,7 +1718,7 @@ def task_log_begin_task(context, task_name,
task_items=None,
message=None,
session=None):
- """Mark a task as started for a given host/time period"""
+ """Mark a task as started for a given host/time period."""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
diff --git a/nova/db/base.py b/nova/db/base.py
index 0ec6ad6e2..2028e375e 100644
--- a/nova/db/base.py
+++ b/nova/db/base.py
@@ -18,17 +18,15 @@
"""Base class for classes that need modular database access."""
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
-
db_driver_opt = cfg.StrOpt('db_driver',
default='nova.db',
help='driver to use for database access')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(db_driver_opt)
+CONF = cfg.CONF
+CONF.register_opt(db_driver_opt)
class Base(object):
@@ -36,5 +34,5 @@ class Base(object):
def __init__(self, db_driver=None):
if not db_driver:
- db_driver = FLAGS.db_driver
+ db_driver = CONF.db_driver
self.db = importutils.import_module(db_driver) # pylint: disable=C0103
diff --git a/nova/db/migration.py b/nova/db/migration.py
index 76b70e14d..5ffa7cdfb 100644
--- a/nova/db/migration.py
+++ b/nova/db/migration.py
@@ -24,7 +24,7 @@ from nova import utils
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.migration')
-INIT_VERSION = 81
+INIT_VERSION = 132
def db_sync(version=None):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 2f33eda0c..038a47ca1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -23,6 +23,7 @@ import collections
import copy
import datetime
import functools
+import uuid
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
@@ -31,24 +32,33 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql import func
from nova import block_device
from nova.common.sqlalchemyutils import paginate_query
+from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
-from nova import utils
+db_opts = [
+ cfg.StrOpt('osapi_compute_unique_server_name_scope',
+ default='',
+ help='When set, compute API will consider duplicate hostnames '
+ 'invalid within the specified scope, regardless of case. '
+ 'Should be empty, "project" or "global".'),
+]
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
+CONF.import_opt('sql_connection', 'nova.db.sqlalchemy.session')
LOG = logging.getLogger(__name__)
@@ -139,20 +149,6 @@ def require_instance_exists_using_uuid(f):
return wrapper
-def require_volume_exists(f):
- """Decorator to require the specified volume to exist.
-
- Requires the wrapped function to use context and volume_id as
- their first two arguments.
- """
-
- def wrapper(context, volume_id, *args, **kwargs):
- db.volume_get(context, volume_id)
- return f(context, volume_id, *args, **kwargs)
- wrapper.__name__ = f.__name__
- return wrapper
-
-
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
@@ -310,12 +306,12 @@ def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
- service_ref.delete(session=session)
+ service_ref.soft_delete(session=session)
- if (service_ref.topic == FLAGS.compute_topic and
+ if (service_ref.topic == CONF.compute_topic and
service_ref.compute_node):
for c in service_ref.compute_node:
- c.delete(session=session)
+ c.soft_delete(session=session)
@require_admin_context
@@ -365,12 +361,12 @@ def service_get_all_by_host(context, host):
@require_admin_context
-def service_get_all_compute_by_host(context, host):
+def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
- filter_by(topic=FLAGS.compute_topic).\
- all()
+ filter_by(topic=CONF.compute_topic).\
+ first()
if not result:
raise exception.ComputeHostNotFound(host=host)
@@ -402,7 +398,7 @@ def service_get_all_compute_sorted(context):
# (SELECT host, SUM(instances.vcpus) AS instance_cores
# FROM instances GROUP BY host) AS inst_cores
# ON services.host = inst_cores.host
- topic = FLAGS.compute_topic
+ topic = CONF.compute_topic
label = 'instance_cores'
subq = model_query(context, models.Instance.host,
func.sum(models.Instance.vcpus).label(label),
@@ -417,24 +413,6 @@ def service_get_all_compute_sorted(context):
@require_admin_context
-def service_get_all_volume_sorted(context):
- session = get_session()
- with session.begin():
- topic = FLAGS.volume_topic
- label = 'volume_gigabytes'
- subq = model_query(context, models.Volume.host,
- func.sum(models.Volume.size).label(label),
- session=session, read_deleted="no").\
- group_by(models.Volume.host).\
- subquery()
- return _service_get_all_topic_subquery(context,
- session,
- topic,
- subq,
- label)
-
-
-@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
@@ -451,7 +429,7 @@ def service_get_by_args(context, host, binary):
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
- if not FLAGS.enable_new_services:
+ if not CONF.enable_new_services:
service_ref.disabled = True
service_ref.save()
return service_ref
@@ -503,7 +481,7 @@ def compute_node_search_by_hypervisor(context, hypervisor_match):
def _prep_stats_dict(values):
- """Make list of ComputeNodeStats"""
+ """Make list of ComputeNodeStats."""
stats = []
d = values.get('stats', {})
for k, v in d.iteritems():
@@ -563,7 +541,7 @@ def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
@require_admin_context
def compute_node_update(context, compute_id, values, prune_stats=False):
- """Updates the ComputeNode record with the most recent data"""
+ """Updates the ComputeNode record with the most recent data."""
stats = values.pop('stats', {})
session = get_session()
@@ -613,8 +591,8 @@ def compute_node_statistics(context):
@require_admin_context
-def certificate_get(context, certificate_id, session=None):
- result = model_query(context, models.Certificate, session=session).\
+def certificate_get(context, certificate_id):
+ result = model_query(context, models.Certificate).\
filter_by(id=certificate_id).\
first()
@@ -742,9 +720,7 @@ def floating_ip_bulk_destroy(context, ips):
for ip_block in _ip_range_splitter(ips):
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow()},
- synchronize_session='fetch')
+ soft_delete(synchronize_session='fetch')
@require_context
@@ -793,9 +769,12 @@ def floating_ip_fixed_ip_associate(context, floating_address,
fixed_ip_ref = fixed_ip_get_by_address(context,
fixed_address,
session=session)
+ if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
+ return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
floating_ip_ref.save(session=session)
+ return fixed_address
@require_context
@@ -987,24 +966,15 @@ def dnsdomain_register_for_project(context, fqdomain, project):
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
- session = get_session()
- with session.begin():
- session.query(models.DNSDomain).\
- filter_by(domain=fqdomain).\
- delete()
+ model_query(context, models.DNSDomain).\
+ filter_by(domain=fqdomain).\
+ delete()
@require_context
def dnsdomain_list(context):
- session = get_session()
- records = model_query(context, models.DNSDomain,
- session=session, read_deleted="no").\
- all()
- domains = []
- for record in records:
- domains.append(record.domain)
-
- return domains
+ query = model_query(context, models.DNSDomain, read_deleted="no")
+ return [row.domain for row in query.all()]
###################
@@ -1035,9 +1005,10 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
- network_id=network_id)
+ network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
- raise exception.FixedIpAlreadyInUse(address=address)
+ raise exception.FixedIpAlreadyInUse(address=address,
+ instance_uuid=instance_uuid)
if not fixed_ip_ref.network_id:
fixed_ip_ref.network_id = network_id
@@ -1183,14 +1154,38 @@ def fixed_ip_get_by_address(context, address, session=None):
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if is_user_context(context) and result['instance_uuid'] is not None:
- instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
- result['instance_uuid'],
- session)
+ instance = _instance_get_by_uuid(context.elevated(read_deleted='yes'),
+ result['instance_uuid'],
+ session)
authorize_project_context(context, instance.project_id)
return result
+@require_admin_context
+def fixed_ip_get_by_address_detailed(context, address, session=None):
+ """
+ :returns: a tuple of (models.FixedIp, models.Network, models.Instance)
+ """
+ if not session:
+ session = get_session()
+
+ result = session.query(models.FixedIp, models.Network, models.Instance).\
+ filter_by(address=address).\
+ outerjoin((models.Network,
+ models.Network.id ==
+ models.FixedIp.network_id)).\
+ outerjoin((models.Instance,
+ models.Instance.uuid ==
+ models.FixedIp.instance_uuid)).\
+ first()
+
+ if not result:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ return result
+
+
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
@@ -1214,7 +1209,7 @@ def fixed_ip_get_by_network_host(context, network_id, host):
first()
if not result:
- raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
+ raise exception.FixedIpNotFoundForNetworkHost(network_uuid=network_id,
host=host)
return result
@@ -1228,12 +1223,6 @@ def fixed_ips_by_virtual_interface(context, vif_id):
return result
-@require_admin_context
-def fixed_ip_get_network(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.network
-
-
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
@@ -1271,12 +1260,12 @@ def _virtual_interface_query(context, session=None):
@require_context
-def virtual_interface_get(context, vif_id, session=None):
+def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
- vif_ref = _virtual_interface_query(context, session=session).\
+ vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@@ -1336,10 +1325,9 @@ def virtual_interface_delete(context, vif_id):
:param vif_id: = id of vif to delete
"""
- session = get_session()
- vif_ref = virtual_interface_get(context, vif_id, session)
- with session.begin():
- session.delete(vif_ref)
+ _virtual_interface_query(context).\
+ filter_by(id=vif_id).\
+ delete()
@require_context
@@ -1349,14 +1337,14 @@ def virtual_interface_delete_by_instance(context, instance_uuid):
:param instance_uuid: = uuid of instance
"""
- vif_refs = virtual_interface_get_by_instance(context, instance_uuid)
- for vif_ref in vif_refs:
- virtual_interface_delete(context, vif_ref['id'])
+ _virtual_interface_query(context).\
+ filter_by(instance_uuid=instance_uuid).\
+ delete()
@require_context
def virtual_interface_get_all(context):
- """Get all vifs"""
+ """Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
@@ -1375,6 +1363,34 @@ def _metadata_refs(metadata_dict, meta_class):
return metadata_refs
+def _validate_unique_server_name(context, session, name):
+ if not CONF.osapi_compute_unique_server_name_scope:
+ return
+
+ search_opts = {'deleted': False}
+ if CONF.osapi_compute_unique_server_name_scope == 'project':
+ search_opts['project_id'] = context.project_id
+ instance_list = instance_get_all_by_filters(context, search_opts,
+ 'created_at', 'desc',
+ session=session)
+ elif CONF.osapi_compute_unique_server_name_scope == 'global':
+ instance_list = instance_get_all_by_filters(context.elevated(),
+ search_opts,
+ 'created_at', 'desc',
+ session=session)
+ else:
+ msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
+ ' Flag must be empty, "global" or'
+ ' "project"') % CONF.osapi_compute_unique_server_name_scope
+ LOG.warn(msg)
+ return
+
+ lowername = name.lower()
+ for instance in instance_list:
+ if instance['hostname'].lower() == lowername:
+ raise exception.InstanceExists(name=instance['hostname'])
+
+
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
@@ -1391,7 +1407,7 @@ def instance_create(context, values):
instance_ref = models.Instance()
if not values.get('uuid'):
- values['uuid'] = str(utils.gen_uuid())
+ values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
@@ -1414,6 +1430,8 @@ def instance_create(context, values):
session = get_session()
with session.begin():
+ if 'hostname' in values:
+ _validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
instance_ref.save(session=session)
@@ -1422,7 +1440,7 @@ def instance_create(context, values):
instance_ref.instance_type
# create the instance uuid to ec2_id mapping entry for instance
- ec2_instance_create(context, instance_ref['uuid'])
+ db.ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
@@ -1446,33 +1464,35 @@ def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
- instance_ref = instance_get_by_uuid(context, instance_uuid,
+ instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = session.query(models.Instance).\
- filter_by(uuid=instance_ref['uuid'])
+ filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
- count = query.update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
session.query(models.SecurityGroupInstanceAssociation).\
- filter_by(instance_uuid=instance_ref['uuid']).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
- instance_info_cache_delete(context, instance_ref['uuid'],
- session=session)
+ session.query(models.InstanceInfoCache).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
return instance_ref
@require_context
-def instance_get_by_uuid(context, uuid, session=None):
+def instance_get_by_uuid(context, uuid):
+ return _instance_get_by_uuid(context, uuid)
+
+
+@require_context
+def _instance_get_by_uuid(context, uuid, session=None):
result = _build_instance_get(context, session=session).\
filter_by(uuid=uuid).\
first()
@@ -1484,8 +1504,8 @@ def instance_get_by_uuid(context, uuid, session=None):
@require_context
-def instance_get(context, instance_id, session=None):
- result = _build_instance_get(context, session=session).\
+def instance_get(context, instance_id):
+ result = _build_instance_get(context).\
filter_by(id=instance_id).\
first()
@@ -1502,7 +1522,8 @@ def _build_instance_get(context, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type'))
+ options(joinedload('instance_type')).\
+ options(joinedload('system_metadata'))
@require_context
@@ -1524,17 +1545,20 @@ def instance_get_all(context, columns_to_join=None):
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
- limit=None, marker=None):
+ limit=None, marker=None, session=None):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise"""
sort_fn = {'desc': desc, 'asc': asc}
- session = get_session()
+ if not session:
+ session = get_session()
+
query_prefix = session.query(models.Instance).\
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
+ options(joinedload('system_metadata')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
@@ -1582,7 +1606,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# paginate query
if marker is not None:
try:
- marker = instance_get_by_uuid(context, marker, session=session)
+ marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
query_prefix = paginate_query(query_prefix, models.Instance, limit,
@@ -1610,7 +1634,7 @@ def regex_filter(query, model, filters):
'oracle': 'REGEXP_LIKE',
'sqlite': 'REGEXP'
}
- db_string = FLAGS.sql_connection.split(':')[0].split('+')[0]
+ db_string = CONF.sql_connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
@@ -1681,6 +1705,12 @@ def instance_get_all_by_host(context, host):
@require_admin_context
+def instance_get_all_by_host_and_node(context, host, node):
+ return _instance_get_all_query(context).filter_by(host=host).\
+ filter_by(node=node).all()
+
+
+@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instance_get_all_query(context).filter_by(host=host).\
filter(models.Instance.instance_type_id != type_id).all()
@@ -1738,52 +1768,13 @@ def instance_floating_address_get_all(context, instance_uuid):
@require_admin_context
-def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
+def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
- if not session:
- session = get_session()
-
- results = session.query(models.Instance).\
+ return model_query(context, models.Instance).\
filter(models.Instance.updated_at <= reboot_window).\
- filter_by(task_state="rebooting").all()
-
- return results
-
-
-@require_context
-def instance_test_and_set(context, instance_uuid, attr, ok_states,
- new_state, session=None):
- """Atomically check if an instance is in a valid state, and if it is, set
- the instance into a new state.
- """
- if not session:
- session = get_session()
-
- with session.begin():
- query = model_query(context, models.Instance, session=session,
- project_only=True)
-
- if uuidutils.is_uuid_like(instance_uuid):
- query = query.filter_by(uuid=instance_uuid)
- else:
- raise exception.InvalidUUID(instance_uuid)
-
- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
- # then this has concurrency issues
- instance = query.with_lockmode('update').first()
-
- state = instance[attr]
- if state not in ok_states:
- raise exception.InstanceInvalidState(
- attr=attr,
- instance_uuid=instance['uuid'],
- state=state,
- method='instance_test_and_set')
-
- instance[attr] = new_state
- instance.save(session=session)
+ filter_by(task_state=task_states.REBOOTING).all()
@require_context
@@ -1814,6 +1805,30 @@ def instance_update_and_get_original(context, instance_uuid, values):
copy_old_instance=True)
+# NOTE(danms): This updates the instance's metadata list in-place and in
+# the database to avoid stale data and refresh issues. It assumes the
+# delete=True behavior of instance_metadata_update(...)
+def _instance_metadata_update_in_place(context, instance, metadata, session):
+ to_delete = []
+ for keyvalue in instance['metadata']:
+ key = keyvalue['key']
+ if key in metadata:
+ keyvalue['value'] = metadata.pop(key)
+ elif key not in metadata:
+ to_delete.append(keyvalue)
+
+ for condemned in to_delete:
+ instance['metadata'].remove(condemned)
+ condemned.soft_delete(session=session)
+
+ for key, value in metadata.iteritems():
+ newitem = models.InstanceMetadata()
+ newitem.update({'key': key, 'value': value,
+ 'instance_uuid': instance['uuid']})
+ session.add(newitem)
+ instance['metadata'].append(newitem)
+
+
def _instance_update(context, instance_uuid, values, copy_old_instance=False):
session = get_session()
@@ -1821,8 +1836,20 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
- instance_ref = instance_get_by_uuid(context, instance_uuid,
- session=session)
+ instance_ref = _instance_get_by_uuid(context, instance_uuid,
+ session=session)
+ # TODO(deva): remove extra_specs from here after it is included
+ # in system_metadata. Until then, the baremetal driver
+ # needs extra_specs added to instance[]
+ inst_type_ref = _instance_type_get_query(context, session=session).\
+ filter_by(id=instance_ref['instance_type_id']).\
+ first()
+ if inst_type_ref:
+ instance_ref['extra_specs'] = \
+ _dict_with_extra_specs(inst_type_ref).get('extra_specs', {})
+ else:
+ instance_ref['extra_specs'] = {}
+
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
@@ -1833,6 +1860,12 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
raise exception.UnexpectedTaskStateError(actual=actual_state,
expected=expected)
+ if ("hostname" in values and
+ values["hostname"].lower() != instance_ref["hostname"].lower()):
+ _validate_unique_server_name(context,
+ session,
+ values['hostname'])
+
if copy_old_instance:
old_instance_ref = copy.copy(instance_ref)
else:
@@ -1840,9 +1873,9 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
metadata = values.get('metadata')
if metadata is not None:
- instance_metadata_update(context, instance_ref['uuid'],
- values.pop('metadata'), True,
- session=session)
+ _instance_metadata_update_in_place(context, instance_ref,
+ values.pop('metadata'),
+ session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
@@ -1852,109 +1885,86 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
instance_ref.update(values)
instance_ref.save(session=session)
+ if 'instance_type_id' in values:
+ # NOTE(comstud): It appears that sqlalchemy doesn't refresh
+ # the instance_type model after you update the ID. You end
+ # up with an instance_type model that only has 'id' updated,
+ # but the rest of the model has the data from the old
+ # instance_type.
+ session.refresh(instance_ref['instance_type'])
return (old_instance_ref, instance_ref)
def instance_add_security_group(context, instance_uuid, security_group_id):
- """Associate the given security group with the given instance"""
- session = get_session()
- with session.begin():
- instance_ref = instance_get_by_uuid(context, instance_uuid,
- session=session)
- security_group_ref = security_group_get(context,
- security_group_id,
- session=session)
- instance_ref.security_groups += [security_group_ref]
- instance_ref.save(session=session)
+ """Associate the given security group with the given instance."""
+ sec_group_ref = models.SecurityGroupInstanceAssociation()
+ sec_group_ref.update({'instance_uuid': instance_uuid,
+ 'security_group_id': security_group_id})
+ sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
- """Disassociate the given security group from the given instance"""
- session = get_session()
- instance_ref = instance_get_by_uuid(context, instance_uuid,
- session=session)
- session.query(models.SecurityGroupInstanceAssociation).\
- filter_by(instance_uuid=instance_ref['uuid']).\
+ """Disassociate the given security group from the given instance."""
+ model_query(context, models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
###################
@require_context
-def instance_info_cache_create(context, values):
- """Create a new instance cache record in the table.
-
- :param context: = request context object
- :param values: = dict containing column values
- """
- info_cache = models.InstanceInfoCache()
- info_cache.update(values)
-
- session = get_session()
- with session.begin():
- info_cache.save(session=session)
- return info_cache
-
-
-@require_context
-def instance_info_cache_get(context, instance_uuid, session=None):
+def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
- session = session or get_session()
-
- info_cache = session.query(models.InstanceInfoCache).\
+ return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
- return info_cache
@require_context
-def instance_info_cache_update(context, instance_uuid, values,
- session=None):
+def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
- session = session or get_session()
- info_cache = instance_info_cache_get(context, instance_uuid,
- session=session)
- if info_cache:
- # NOTE(tr3buchet): let's leave it alone if it's already deleted
- if info_cache['deleted']:
- return info_cache
-
- info_cache.update(values)
- info_cache.save(session=session)
- else:
- # NOTE(tr3buchet): just in case someone blows away an instance's
- # cache entry
- values['instance_uuid'] = instance_uuid
- info_cache = instance_info_cache_create(context, values)
+ session = get_session()
+ with session.begin():
+ info_cache = model_query(context, models.InstanceInfoCache,
+ session=session).\
+ filter_by(instance_uuid=instance_uuid).\
+ first()
+
+ if info_cache and not info_cache['deleted']:
+ # NOTE(tr3buchet): let's leave it alone if it's already deleted
+ info_cache.update(values)
+ else:
+ # NOTE(tr3buchet): just in case someone blows away an instance's
+ # cache entry
+ info_cache = models.InstanceInfoCache()
+ info_cache.update({'instance_uuid': instance_uuid})
return info_cache
@require_context
-def instance_info_cache_delete(context, instance_uuid, session=None):
+def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
- values = {'deleted': True,
- 'deleted_at': timeutils.utcnow()}
- instance_info_cache_update(context, instance_uuid, values, session)
+ model_query(context, models.InstanceInfoCache).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
###################
@@ -1971,28 +1981,16 @@ def key_pair_create(context, values):
@require_context
def key_pair_destroy(context, user_id, name):
authorize_user_context(context, user_id)
- session = get_session()
- with session.begin():
- key_pair_ref = key_pair_get(context, user_id, name, session=session)
- key_pair_ref.delete(session=session)
+ model_query(context, models.KeyPair).\
+ filter_by(user_id=user_id).\
+ filter_by(name=name).\
+ delete()
@require_context
-def key_pair_destroy_all_by_user(context, user_id):
+def key_pair_get(context, user_id, name):
authorize_user_context(context, user_id)
- session = get_session()
- with session.begin():
- session.query(models.KeyPair).\
- filter_by(user_id=user_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def key_pair_get(context, user_id, name, session=None):
- authorize_user_context(context, user_id)
- result = model_query(context, models.KeyPair, session=session).\
+ result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
@@ -2072,11 +2070,6 @@ def network_associate(context, project_id, network_id=None, force=False):
@require_admin_context
-def network_count(context):
- return model_query(context, models.Network).count()
-
-
-@require_admin_context
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@@ -2098,7 +2091,7 @@ def network_create_safe(context, values):
raise exception.DuplicateVlan(vlan=values['vlan'])
network_ref = models.Network()
- network_ref['uuid'] = str(utils.gen_uuid())
+ network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
@@ -2116,24 +2109,27 @@ def network_delete_safe(context, network_id):
filter_by(network_id=network_id).\
filter_by(deleted=False).\
filter_by(allocated=True).\
- all()
- if result:
+ count()
+ if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = network_get(context, network_id=network_id,
session=session)
session.query(models.FixedIp).\
filter_by(network_id=network_id).\
filter_by(deleted=False).\
- update({'deleted': True,
- 'updated_at': literal_column('updated_at'),
- 'deleted_at': timeutils.utcnow()})
+ soft_delete()
session.delete(network_ref)
@require_admin_context
-def network_disassociate(context, network_id):
- network_update(context, network_id, {'project_id': None,
- 'host': None})
+def network_disassociate(context, network_id, disassociate_host,
+ disassociate_project):
+ net_update = {}
+ if disassociate_project:
+ net_update['project_id'] = None
+ if disassociate_host:
+ net_update['host'] = None
+ network_update(context, network_id, net_update)
@require_context
@@ -2237,6 +2233,11 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
return data
+def network_in_use_on_host(context, network_id, host):
+ fixed_ips = network_get_associated_fixed_ips(context, network_id, host)
+ return len(fixed_ips) > 0
+
+
@require_admin_context
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
@@ -2305,11 +2306,20 @@ def network_get_all_by_instance(context, instance_id):
@require_admin_context
def network_get_all_by_host(context, host):
session = get_session()
+ fixed_host_filter = or_(models.FixedIp.host == host,
+ models.Instance.host == host)
fixed_ip_query = model_query(context, models.FixedIp.network_id,
session=session).\
- filter(models.FixedIp.host == host)
+ outerjoin((models.VirtualInterface,
+ models.VirtualInterface.id ==
+ models.FixedIp.virtual_interface_id)).\
+ outerjoin((models.Instance,
+ models.Instance.uuid ==
+ models.VirtualInterface.instance_uuid)).\
+ filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
+ # or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
@@ -2375,9 +2385,8 @@ def iscsi_target_create_safe(context, values):
@require_context
-def quota_get(context, project_id, resource, session=None):
- result = model_query(context, models.Quota, session=session,
- read_deleted="no").\
+def quota_get(context, project_id, resource):
+ result = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
@@ -2415,28 +2424,21 @@ def quota_create(context, project_id, resource, limit):
@require_admin_context
def quota_update(context, project_id, resource, limit):
- session = get_session()
- with session.begin():
- quota_ref = quota_get(context, project_id, resource, session=session)
- quota_ref.hard_limit = limit
- quota_ref.save(session=session)
-
+ result = model_query(context, models.Quota, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(resource=resource).\
+ update({'hard_limit': limit})
-@require_admin_context
-def quota_destroy(context, project_id, resource):
- session = get_session()
- with session.begin():
- quota_ref = quota_get(context, project_id, resource, session=session)
- quota_ref.delete(session=session)
+ if not result:
+ raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
-def quota_class_get(context, class_name, resource, session=None):
- result = model_query(context, models.QuotaClass, session=session,
- read_deleted="no").\
+def quota_class_get(context, class_name, resource):
+ result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
@@ -2474,43 +2476,21 @@ def quota_class_create(context, class_name, resource, limit):
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
- session = get_session()
- with session.begin():
- quota_class_ref = quota_class_get(context, class_name, resource,
- session=session)
- quota_class_ref.hard_limit = limit
- quota_class_ref.save(session=session)
-
-
-@require_admin_context
-def quota_class_destroy(context, class_name, resource):
- session = get_session()
- with session.begin():
- quota_class_ref = quota_class_get(context, class_name, resource,
- session=session)
- quota_class_ref.delete(session=session)
-
-
-@require_admin_context
-def quota_class_destroy_all_by_name(context, class_name):
- session = get_session()
- with session.begin():
- quota_classes = model_query(context, models.QuotaClass,
- session=session, read_deleted="no").\
- filter_by(class_name=class_name).\
- all()
+ result = model_query(context, models.QuotaClass, read_deleted="no").\
+ filter_by(class_name=class_name).\
+ filter_by(resource=resource).\
+ update({'hard_limit': limit})
- for quota_class_ref in quota_classes:
- quota_class_ref.delete(session=session)
+ if not result:
+ raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
-def quota_usage_get(context, project_id, resource, session=None):
- result = model_query(context, models.QuotaUsage, session=session,
- read_deleted="no").\
+def quota_usage_get(context, project_id, resource):
+ result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
@@ -2537,7 +2517,7 @@ def quota_usage_get_all_by_project(context, project_id):
@require_admin_context
-def quota_usage_create(context, project_id, resource, in_use, reserved,
+def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
@@ -2545,49 +2525,37 @@ def quota_usage_create(context, project_id, resource, in_use, reserved,
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
+
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
-def quota_usage_update(context, project_id, resource, session=None, **kwargs):
- def do_update(session):
- quota_usage_ref = quota_usage_get(context, project_id, resource,
- session=session)
- if 'in_use' in kwargs:
- quota_usage_ref.in_use = kwargs['in_use']
- if 'reserved' in kwargs:
- quota_usage_ref.reserved = kwargs['reserved']
- if 'until_refresh' in kwargs:
- quota_usage_ref.until_refresh = kwargs['until_refresh']
- quota_usage_ref.save(session=session)
-
- if session:
- # Assume caller started a transaction
- do_update(session)
- else:
- session = get_session()
- with session.begin():
- do_update(session)
+def quota_usage_update(context, project_id, resource, **kwargs):
+ updates = {}
+ if 'in_use' in kwargs:
+ updates['in_use'] = kwargs['in_use']
+ if 'reserved' in kwargs:
+ updates['reserved'] = kwargs['reserved']
+ if 'until_refresh' in kwargs:
+ updates['until_refresh'] = kwargs['until_refresh']
+ result = model_query(context, models.QuotaUsage, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(resource=resource).\
+ update(updates)
-@require_admin_context
-def quota_usage_destroy(context, project_id, resource):
- session = get_session()
- with session.begin():
- quota_usage_ref = quota_usage_get(context, project_id, resource,
- session=session)
- quota_usage_ref.delete(session=session)
+ if not result:
+ raise exception.QuotaUsageNotFound(project_id=project_id)
###################
@require_context
-def reservation_get(context, uuid, session=None):
- result = model_query(context, models.Reservation, session=session,
- read_deleted="no").\
+def reservation_get(context, uuid):
+ result = model_query(context, models.Reservation, read_deleted="no").\
filter_by(uuid=uuid).\
first()
@@ -2597,22 +2565,6 @@ def reservation_get(context, uuid, session=None):
return result
-@require_context
-def reservation_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
-
- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
-
- result = {'project_id': project_id}
- for row in rows:
- result.setdefault(row.resource, {})
- result[row.resource][row.uuid] = row.delta
-
- return result
-
-
@require_admin_context
def reservation_create(context, uuid, usage, project_id, resource, delta,
expire, session=None):
@@ -2629,10 +2581,12 @@ def reservation_create(context, uuid, usage, project_id, resource, delta,
@require_admin_context
def reservation_destroy(context, uuid):
- session = get_session()
- with session.begin():
- reservation_ref = reservation_get(context, uuid, session=session)
- reservation_ref.delete(session=session)
+ result = model_query(context, models.Reservation, read_deleted="no").\
+ filter_by(uuid=uuid).\
+ delete()
+
+ if not result:
+ raise exception.ReservationNotFound(uuid=uuid)
###################
@@ -2643,12 +2597,12 @@ def reservation_destroy(context, uuid):
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
-def _get_quota_usages(context, session):
+def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
- filter_by(project_id=context.project_id).\
+ filter_by(project_id=project_id).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
@@ -2656,12 +2610,16 @@ def _get_quota_usages(context, session):
@require_context
def quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
+
+ if project_id is None:
+ project_id = context.project_id
+
# Get the current usages
- usages = _get_quota_usages(context, session)
+ usages = _get_quota_usages(context, session, project_id)
# Handle usage refresh
work = set(deltas.keys())
@@ -2671,8 +2629,8 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# Do we need to refresh the usage?
refresh = False
if resource not in usages:
- usages[resource] = quota_usage_create(elevated,
- context.project_id,
+ usages[resource] = _quota_usage_create(elevated,
+ project_id,
resource,
0, 0,
until_refresh or None,
@@ -2695,12 +2653,12 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# Grab the sync routine
sync = resources[resource].sync
- updates = sync(elevated, context.project_id, session)
+ updates = sync(elevated, project_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
- usages[res] = quota_usage_create(elevated,
- context.project_id,
+ usages[res] = _quota_usage_create(elevated,
+ project_id,
res,
0, 0,
until_refresh or None,
@@ -2748,9 +2706,9 @@ def quota_reserve(context, resources, quotas, deltas, expire,
reservations = []
for resource, delta in deltas.items():
reservation = reservation_create(elevated,
- str(utils.gen_uuid()),
+ str(uuid.uuid4()),
usages[resource],
- context.project_id,
+ project_id,
resource, delta, expire,
session=session)
reservations.append(reservation.uuid)
@@ -2786,7 +2744,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
return reservations
-def _quota_reservations(session, context, reservations):
+def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
@@ -2794,72 +2752,56 @@ def _quota_reservations(session, context, reservations):
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
- with_lockmode('update').\
- all()
+ with_lockmode('update')
@require_context
-def reservation_commit(context, reservations):
+def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
- usages = _get_quota_usages(context, session)
-
- for reservation in _quota_reservations(session, context, reservations):
+ usages = _get_quota_usages(context, session, project_id)
+ reservation_query = _quota_reservations_query(session, context,
+ reservations)
+ for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
-
- reservation.delete(session=session)
-
- for usage in usages.values():
- usage.save(session=session)
+ reservation_query.soft_delete(synchronize_session=False)
@require_context
-def reservation_rollback(context, reservations):
+def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
- usages = _get_quota_usages(context, session)
-
- for reservation in _quota_reservations(session, context, reservations):
+ usages = _get_quota_usages(context, session, project_id)
+ reservation_query = _quota_reservations_query(session, context,
+ reservations)
+ for reservation in reservation_query.all():
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
-
- reservation.delete(session=session)
-
- for usage in usages.values():
- usage.save(session=session)
+ reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
- quotas = model_query(context, models.Quota, session=session,
- read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
-
- for quota_ref in quotas:
- quota_ref.delete(session=session)
+ model_query(context, models.Quota, session=session,
+ read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
- quota_usages = model_query(context, models.QuotaUsage,
- session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
-
- for quota_usage_ref in quota_usages:
- quota_usage_ref.delete(session=session)
-
- reservations = model_query(context, models.Reservation,
- session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ model_query(context, models.QuotaUsage,
+ session=session, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
- for reservation_ref in reservations:
- reservation_ref.delete(session=session)
+ model_query(context, models.Reservation,
+ session=session, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
@require_admin_context
@@ -2867,133 +2809,21 @@ def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
- results = model_query(context, models.Reservation, session=session,
- read_deleted="no").\
- filter(models.Reservation.expire < current_time).\
- all()
+ reservation_query = model_query(context, models.Reservation,
+ session=session, read_deleted="no").\
+ filter(models.Reservation.expire < current_time)
- if results:
- for reservation in results:
- if reservation.delta >= 0:
- reservation.usage.reserved -= reservation.delta
- reservation.usage.save(session=session)
+ for reservation in reservation_query.join(models.QuotaUsage).all():
+ if reservation.delta >= 0:
+ reservation.usage.reserved -= reservation.delta
+ reservation.usage.save(session=session)
- reservation.delete(session=session)
+ reservation_query.soft_delete(synchronize_session=False)
###################
-@require_admin_context
-def volume_allocate_iscsi_target(context, volume_id, host):
- session = get_session()
- with session.begin():
- iscsi_target_ref = model_query(context, models.IscsiTarget,
- session=session, read_deleted="no").\
- filter_by(volume=None).\
- filter_by(host=host).\
- with_lockmode('update').\
- first()
-
- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
- # then this has concurrency issues
- if not iscsi_target_ref:
- raise db.NoMoreTargets()
-
- iscsi_target_ref.volume_id = volume_id
- session.add(iscsi_target_ref)
-
- return iscsi_target_ref.target_num
-
-
-@require_admin_context
-def volume_attached(context, volume_id, instance_uuid, mountpoint):
- if not uuidutils.is_uuid_like(instance_uuid):
- raise exception.InvalidUUID(instance_uuid)
-
- session = get_session()
- with session.begin():
- volume_ref = volume_get(context, volume_id, session=session)
- volume_ref['status'] = 'in-use'
- volume_ref['mountpoint'] = mountpoint
- volume_ref['attach_status'] = 'attached'
- volume_ref['instance_uuid'] = instance_uuid
- volume_ref['attach_time'] = timeutils.utcnow()
- volume_ref.save(session=session)
-
-
-@require_context
-def volume_create(context, values):
- values['volume_metadata'] = _metadata_refs(values.get('metadata'),
- models.VolumeMetadata)
- volume_ref = models.Volume()
- if not values.get('id'):
- values['id'] = str(utils.gen_uuid())
- volume_ref.update(values)
-
- session = get_session()
- with session.begin():
- volume_ref.save(session=session)
-
- return volume_get(context, values['id'], session=session)
-
-
-@require_admin_context
-def volume_data_get_for_project(context, project_id, session=None):
- result = model_query(context,
- func.count(models.Volume.id),
- func.sum(models.Volume.size),
- read_deleted="no",
- session=session).\
- filter_by(project_id=project_id).\
- first()
-
- # NOTE(vish): convert None to 0
- return (result[0] or 0, result[1] or 0)
-
-
-@require_admin_context
-def volume_destroy(context, volume_id):
- session = get_session()
- with session.begin():
- volume_ref = volume_get(context, volume_id, session=session)
- session.query(models.Volume).\
- filter_by(id=volume_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- session.query(models.IscsiTarget).\
- filter_by(volume_id=volume_id).\
- update({'volume_id': None})
- session.query(models.VolumeMetadata).\
- filter_by(volume_id=volume_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- return volume_ref
-
-
-@require_admin_context
-def volume_detached(context, volume_id):
- session = get_session()
- with session.begin():
- volume_ref = volume_get(context, volume_id, session=session)
- volume_ref['status'] = 'available'
- volume_ref['mountpoint'] = None
- volume_ref['attach_status'] = 'detached'
- volume_ref['instance_uuid'] = None
- volume_ref['attach_time'] = None
- volume_ref.save(session=session)
-
-
-@require_context
-def _volume_get_query(context, session=None, project_only=False):
- return model_query(context, models.Volume, session=session,
- project_only=project_only).\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type'))
-
-
@require_context
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
@@ -3006,48 +2836,6 @@ def _ec2_snapshot_get_query(context, session=None):
session=session, read_deleted='yes')
-@require_context
-def volume_get(context, volume_id, session=None):
- result = _volume_get_query(context, session=session, project_only=True).\
- filter_by(id=volume_id).\
- first()
-
- if not result:
- raise exception.VolumeNotFound(volume_id=volume_id)
-
- return result
-
-
-@require_admin_context
-def volume_get_all(context):
- return _volume_get_query(context).all()
-
-
-@require_admin_context
-def volume_get_all_by_host(context, host):
- return _volume_get_query(context).filter_by(host=host).all()
-
-
-@require_admin_context
-def volume_get_all_by_instance_uuid(context, instance_uuid):
- result = model_query(context, models.Volume, read_deleted="no").\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type')).\
- filter_by(instance_uuid=instance_uuid).\
- all()
-
- if not result:
- return []
-
- return result
-
-
-@require_context
-def volume_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
- return _volume_get_query(context).filter_by(project_id=project_id).all()
-
-
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
@@ -3061,25 +2849,8 @@ def volume_get_iscsi_target_num(context, volume_id):
@require_context
-def volume_update(context, volume_id, values):
- session = get_session()
- volume_ref = volume_get(context, volume_id, session=session)
- metadata = values.get('metadata')
- if metadata is not None:
- volume_metadata_update(context,
- volume_id,
- values.pop('metadata'),
- delete=True)
- with session.begin():
- volume_ref.update(values)
- volume_ref.save(session=session)
-
- return volume_ref
-
-
-@require_context
def ec2_volume_create(context, volume_uuid, id=None):
- """Create ec2 compatable volume by provided uuid"""
+ """Create ec2 compatable volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
@@ -3116,7 +2887,7 @@ def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
- """Create ec2 compatable snapshot by provided uuid"""
+ """Create ec2 compatable snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
@@ -3151,153 +2922,6 @@ def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
return result['uuid']
-####################
-
-def _volume_metadata_get_query(context, volume_id, session=None):
- return model_query(context, models.VolumeMetadata,
- session=session, read_deleted="no").\
- filter_by(volume_id=volume_id)
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_get(context, volume_id):
- rows = _volume_metadata_get_query(context, volume_id).all()
- result = {}
- for row in rows:
- result[row['key']] = row['value']
-
- return result
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_delete(context, volume_id, key):
- _volume_metadata_get_query(context, volume_id).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_get_item(context, volume_id, key, session=None):
- result = _volume_metadata_get_query(context, volume_id, session=session).\
- filter_by(key=key).\
- first()
-
- if not result:
- raise exception.VolumeMetadataNotFound(metadata_key=key,
- volume_id=volume_id)
- return result
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_update(context, volume_id, metadata, delete):
- session = get_session()
-
- # Set existing metadata to deleted if delete argument is True
- if delete:
- original_metadata = volume_metadata_get(context, volume_id)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = volume_metadata_get_item(context, volume_id,
- meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- # Now update all existing items with new values, or create new meta objects
- for meta_key, meta_value in metadata.iteritems():
-
- # update the value whether it exists or not
- item = {"value": meta_value}
-
- try:
- meta_ref = volume_metadata_get_item(context, volume_id,
- meta_key, session)
- except exception.VolumeMetadataNotFound:
- meta_ref = models.VolumeMetadata()
- item.update({"key": meta_key, "volume_id": volume_id})
-
- meta_ref.update(item)
- meta_ref.save(session=session)
-
- return metadata
-
-
-###################
-
-
-@require_context
-def snapshot_create(context, values):
- snapshot_ref = models.Snapshot()
- if not values.get('id'):
- values['id'] = str(utils.gen_uuid())
- snapshot_ref.update(values)
-
- session = get_session()
- with session.begin():
- snapshot_ref.save(session=session)
- return snapshot_ref
-
-
-@require_admin_context
-def snapshot_destroy(context, snapshot_id):
- session = get_session()
- with session.begin():
- session.query(models.Snapshot).\
- filter_by(id=snapshot_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def snapshot_get(context, snapshot_id, session=None):
- result = model_query(context, models.Snapshot, session=session,
- project_only=True).\
- filter_by(id=snapshot_id).\
- first()
-
- if not result:
- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
-
- return result
-
-
-@require_admin_context
-def snapshot_get_all(context):
- return model_query(context, models.Snapshot).all()
-
-
-@require_context
-def snapshot_get_all_for_volume(context, volume_id):
- return model_query(context, models.Snapshot, read_deleted='no',
- project_only=True).\
- filter_by(volume_id=volume_id).all()
-
-
-@require_context
-def snapshot_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
- return model_query(context, models.Snapshot).\
- filter_by(project_id=project_id).\
- all()
-
-
-@require_context
-def snapshot_update(context, snapshot_id, values):
- session = get_session()
- with session.begin():
- snapshot_ref = snapshot_get(context, snapshot_id, session=session)
- snapshot_ref.update(values)
- snapshot_ref.save(session=session)
-
-
###################
@@ -3349,9 +2973,7 @@ def block_device_mapping_update_or_create(context, values):
filter_by(virtual_name=virtual_name).\
filter(models.BlockDeviceMapping.device_name !=
values['device_name']).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
@require_context
@@ -3367,9 +2989,7 @@ def block_device_mapping_destroy(context, bdm_id):
with session.begin():
session.query(models.BlockDeviceMapping).\
filter_by(id=bdm_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
@require_context
@@ -3380,9 +3000,7 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
_block_device_mapping_get_query(context, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
@require_context
@@ -3393,9 +3011,7 @@ def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
_block_device_mapping_get_query(context, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
###################
@@ -3561,25 +3177,16 @@ def security_group_destroy(context, security_group_id):
with session.begin():
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
+ soft_delete()
session.query(models.SecurityGroupIngressRule).\
filter_by(parent_group_id=security_group_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
@require_context
@@ -3642,10 +3249,12 @@ def security_group_rule_create(context, values):
def security_group_rule_destroy(context, security_group_rule_id):
session = get_session()
with session.begin():
- security_group_rule = security_group_rule_get(context,
- security_group_rule_id,
- session=session)
- security_group_rule.delete(session=session)
+ count = _security_group_rule_get_query(context, session=session).\
+ filter_by(id=security_group_rule_id).\
+ soft_delete()
+ if count == 0:
+ raise exception.SecurityGroupNotFoundForRule(
+ rule_id=security_group_rule_id)
@require_context
@@ -3678,9 +3287,7 @@ def provider_fw_rule_destroy(context, rule_id):
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
###################
@@ -3766,6 +3373,20 @@ def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
all()
+@require_admin_context
+def migration_get_in_progress_by_host_and_node(context, host, node,
+ session=None):
+
+ return model_query(context, models.Migration, session=session).\
+ filter(or_(and_(models.Migration.source_compute == host,
+ models.Migration.source_node == node),
+ and_(models.Migration.dest_compute == host,
+ models.Migration.dest_node == node))).\
+ filter(~models.Migration.status.in_(['confirmed', 'reverted'])).\
+ options(joinedload('instance')).\
+ all()
+
+
##################
@@ -3901,7 +3522,7 @@ def instance_type_create(context, values):
def _dict_with_extra_specs(inst_type_query):
- """Takes an instance, volume, or instance type query returned
+ """Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
@@ -3972,7 +3593,7 @@ def instance_type_get_all(context, inactive=False, filters=None):
@require_context
def instance_type_get(context, id, session=None):
- """Returns a dict describing specific instance_type"""
+ """Returns a dict describing specific instance_type."""
result = _instance_type_get_query(context, session=session).\
filter_by(id=id).\
first()
@@ -3985,7 +3606,7 @@ def instance_type_get(context, id, session=None):
@require_context
def instance_type_get_by_name(context, name, session=None):
- """Returns a dict describing specific instance_type"""
+ """Returns a dict describing specific instance_type."""
result = _instance_type_get_query(context, session=session).\
filter_by(name=name).\
first()
@@ -3998,7 +3619,7 @@ def instance_type_get_by_name(context, name, session=None):
@require_context
def instance_type_get_by_flavor_id(context, flavor_id, session=None):
- """Returns a dict describing specific flavor_id"""
+ """Returns a dict describing specific flavor_id."""
result = _instance_type_get_query(context, session=session).\
filter_by(flavorid=flavor_id).\
first()
@@ -4011,7 +3632,7 @@ def instance_type_get_by_flavor_id(context, flavor_id, session=None):
@require_admin_context
def instance_type_destroy(context, name):
- """Marks specific instance_type as deleted"""
+ """Marks specific instance_type as deleted."""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_name(context, name,
@@ -4019,14 +3640,10 @@ def instance_type_destroy(context, name):
instance_type_id = instance_type_ref['id']
session.query(models.InstanceTypes).\
filter_by(id=instance_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
@require_context
@@ -4037,7 +3654,7 @@ def _instance_type_access_query(context, session=None):
@require_admin_context
def instance_type_access_get_by_flavor_id(context, flavor_id):
- """Get flavor access list by flavor id"""
+ """Get flavor access list by flavor id."""
instance_type_ref = _instance_type_get_query(context).\
filter_by(flavorid=flavor_id).\
first()
@@ -4047,7 +3664,7 @@ def instance_type_access_get_by_flavor_id(context, flavor_id):
@require_admin_context
def instance_type_access_add(context, flavor_id, project_id):
- """Add given tenant to the flavor access list"""
+ """Add given tenant to the flavor access list."""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
@@ -4075,25 +3692,65 @@ def instance_type_access_add(context, flavor_id, project_id):
@require_admin_context
def instance_type_access_remove(context, flavor_id, project_id):
- """Remove given tenant from the flavor access list"""
+ """Remove given tenant from the flavor access list."""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
session=session)
instance_type_id = instance_type_ref['id']
- access_ref = _instance_type_access_query(context, session=session).\
+ count = _instance_type_access_query(context, session=session).\
filter_by(instance_type_id=instance_type_id).\
- filter_by(project_id=project_id).first()
+ filter_by(project_id=project_id).\
+ soft_delete()
- if access_ref:
- access_ref.update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- else:
+ if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
+####################
+
+
+@require_admin_context
+def cell_create(context, values):
+ cell = models.Cell()
+ cell.update(values)
+ cell.save()
+ return cell
+
+
+def _cell_get_by_name_query(context, cell_name, session=None):
+ return model_query(context, models.Cell,
+ session=session).filter_by(name=cell_name)
+
+
+@require_admin_context
+def cell_update(context, cell_name, values):
+ session = get_session()
+ with session.begin():
+ cell = _cell_get_by_name_query(context, cell_name, session=session)
+ cell.update(values)
+ return cell
+
+
+@require_admin_context
+def cell_delete(context, cell_name):
+ return _cell_get_by_name_query(context, cell_name).soft_delete()
+
+
+@require_admin_context
+def cell_get(context, cell_name):
+ result = _cell_get_by_name_query(context, cell_name).first()
+ if not result:
+ raise exception.CellNotFound(cell_name=cell_name)
+ return result
+
+
+@require_admin_context
+def cell_get_all(context):
+ return model_query(context, models.Cell, read_deleted="no").all()
+
+
########################
# User-provided metadata
@@ -4119,9 +3776,7 @@ def instance_metadata_get(context, instance_uuid, session=None):
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
@require_context
@@ -4141,43 +3796,42 @@ def instance_metadata_get_item(context, instance_uuid, key, session=None):
@require_context
def instance_metadata_update(context, instance_uuid, metadata, delete,
session=None):
+ all_keys = metadata.keys()
+ synchronize_session = "fetch"
if session is None:
session = get_session()
- # Set existing metadata to deleted if delete argument is True
- if delete:
- original_metadata = instance_metadata_get(context, instance_uuid,
- session=session)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = instance_metadata_get_item(context, instance_uuid,
- meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- # Now update all existing items with new values, or create new meta objects
- for meta_key, meta_value in metadata.iteritems():
+ synchronize_session = False
+ with session.begin(subtransactions=True):
+ if delete:
+ _instance_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(~models.InstanceMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=synchronize_session)
+
+ already_existing_keys = []
+ meta_refs = _instance_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(models.InstanceMetadata.key.in_(all_keys)).\
+ all()
- # update the value whether it exists or not
- item = {"value": meta_value}
+ for meta_ref in meta_refs:
+ already_existing_keys.append(meta_ref.key)
+ meta_ref.update({"value": metadata[meta_ref.key]})
- try:
- meta_ref = instance_metadata_get_item(context, instance_uuid,
- meta_key, session)
- except exception.InstanceMetadataNotFound:
+ new_keys = set(all_keys) - set(already_existing_keys)
+ for key in new_keys:
meta_ref = models.InstanceMetadata()
- item.update({"key": meta_key, "instance_uuid": instance_uuid})
-
- meta_ref.update(item)
- meta_ref.save(session=session)
+ meta_ref.update({"key": key, "value": metadata[key],
+ "instance_uuid": instance_uuid})
+ session.add(meta_ref)
- return metadata
+ return metadata
#######################
# System-owned metadata
+
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
@@ -4196,15 +3850,6 @@ def instance_system_metadata_get(context, instance_uuid, session=None):
return result
-@require_context
-def instance_system_metadata_delete(context, instance_uuid, key):
- _instance_system_metadata_get_query(context, instance_uuid).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
def _instance_system_metadata_get_item(context, instance_uuid, key,
session=None):
result = _instance_system_metadata_get_query(
@@ -4222,39 +3867,36 @@ def _instance_system_metadata_get_item(context, instance_uuid, key,
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete,
session=None):
+ all_keys = metadata.keys()
+ synchronize_session = "fetch"
if session is None:
session = get_session()
+ synchronize_session = False
+ with session.begin(subtransactions=True):
+ if delete:
+ _instance_system_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=synchronize_session)
+
+ already_existing_keys = []
+ meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
+ session=session).\
+ filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
+ all()
- # Set existing metadata to deleted if delete argument is True
- if delete:
- original_metadata = instance_system_metadata_get(
- context, instance_uuid, session=session)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = _instance_system_metadata_get_item(
- context, instance_uuid, meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- # Now update all existing items with new values, or create new meta objects
- for meta_key, meta_value in metadata.iteritems():
-
- # update the value whether it exists or not
- item = {"value": meta_value}
+ for meta_ref in meta_refs:
+ already_existing_keys.append(meta_ref.key)
+ meta_ref.update({"value": metadata[meta_ref.key]})
- try:
- meta_ref = _instance_system_metadata_get_item(
- context, instance_uuid, meta_key, session)
- except exception.InstanceSystemMetadataNotFound:
+ new_keys = set(all_keys) - set(already_existing_keys)
+ for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
- item.update({"key": meta_key, "instance_uuid": instance_uuid})
-
- meta_ref.update(item)
- meta_ref.save(session=session)
+ meta_ref.update({"key": key, "value": metadata[key],
+ "instance_uuid": instance_uuid})
+ session.add(meta_ref)
- return metadata
+ return metadata
####################
@@ -4280,8 +3922,13 @@ def agent_build_get_by_triple(context, hypervisor, os, architecture,
@require_admin_context
-def agent_build_get_all(context):
- return model_query(context, models.AgentBuild, read_deleted="no").\
+def agent_build_get_all(context, hypervisor=None):
+ if hypervisor:
+ return model_query(context, models.AgentBuild, read_deleted="no").\
+ filter_by(hypervisor=hypervisor).\
+ all()
+ else:
+ return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@@ -4289,12 +3936,12 @@ def agent_build_get_all(context):
def agent_build_destroy(context, agent_build_id):
session = get_session()
with session.begin():
- model_query(context, models.AgentBuild, session=session,
- read_deleted="yes").\
+ count = model_query(context, models.AgentBuild,
+ session=session, read_deleted="yes").\
filter_by(id=agent_build_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ soft_delete()
+ if count == 0:
+ raise exception.AgentBuildNotFound(id=agent_build_id)
@require_admin_context
@@ -4305,7 +3952,8 @@ def agent_build_update(context, agent_build_id, values):
session=session, read_deleted="yes").\
filter_by(id=agent_build_id).\
first()
-
+ if not agent_build_ref:
+ raise exception.AgentBuildNotFound(id=agent_build_id)
agent_build_ref.update(values)
agent_build_ref.save(session=session)
@@ -4404,10 +4052,7 @@ def instance_type_extra_specs_delete(context, flavor_id, key):
_instance_type_extra_specs_get_query(
context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')},
- synchronize_session=False)
+ soft_delete(synchronize_session=False)
@require_context
@@ -4443,190 +4088,90 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
return specs
-##################
-
-
-@require_admin_context
-def volume_type_create(context, values):
- """Create a new instance type. In order to pass in extra specs,
- the values dict should contain a 'extra_specs' key/value pair:
-
- {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
-
- """
- session = get_session()
- with session.begin():
- try:
- volume_type_get_by_name(context, values['name'], session)
- raise exception.VolumeTypeExists(name=values['name'])
- except exception.VolumeTypeNotFoundByName:
- pass
- try:
- values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
- models.VolumeTypeExtraSpecs)
- volume_type_ref = models.VolumeTypes()
- volume_type_ref.update(values)
- volume_type_ref.save()
- except Exception, e:
- raise exception.DBError(e)
- return volume_type_ref
-
-
-@require_context
-def volume_type_get_all(context, inactive=False, filters=None):
- """
- Returns a dict describing all volume_types with name as key.
- """
- filters = filters or {}
-
- read_deleted = "yes" if inactive else "no"
- rows = model_query(context, models.VolumeTypes,
- read_deleted=read_deleted).\
- options(joinedload('extra_specs')).\
- order_by("name").\
- all()
-
- # TODO(sirp): this patern of converting rows to a result with extra_specs
- # is repeated quite a bit, might be worth creating a method for it
- result = {}
- for row in rows:
- result[row['name']] = _dict_with_extra_specs(row)
-
- return result
+####################
@require_context
-def volume_type_get(context, id, session=None):
- """Returns a dict describing specific volume_type"""
- result = model_query(context, models.VolumeTypes, session=session).\
- options(joinedload('extra_specs')).\
- filter_by(id=id).\
- first()
-
- if not result:
- raise exception.VolumeTypeNotFound(volume_type_id=id)
-
- return _dict_with_extra_specs(result)
+def vol_get_usage_by_time(context, begin):
+ """Return volumes usage that have been updated after a specified time."""
+ return model_query(context, models.VolumeUsage, read_deleted="yes").\
+ filter(or_(models.VolumeUsage.tot_last_refreshed == None,
+ models.VolumeUsage.tot_last_refreshed > begin,
+ models.VolumeUsage.curr_last_refreshed == None,
+ models.VolumeUsage.curr_last_refreshed > begin,
+ )).\
+ all()
@require_context
-def volume_type_get_by_name(context, name, session=None):
- """Returns a dict describing specific volume_type"""
- result = model_query(context, models.VolumeTypes, session=session).\
- options(joinedload('extra_specs')).\
- filter_by(name=name).\
- first()
-
- if not result:
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- else:
- return _dict_with_extra_specs(result)
+def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
+ instance_id, last_refreshed=None, update_totals=False,
+ session=None):
+ if not session:
+ session = get_session()
+ if last_refreshed is None:
+ last_refreshed = timeutils.utcnow()
-@require_admin_context
-def volume_type_destroy(context, name):
- session = get_session()
with session.begin():
- volume_type_ref = volume_type_get_by_name(context, name,
- session=session)
- volume_type_id = volume_type_ref['id']
- session.query(models.VolumeTypes).\
- filter_by(id=volume_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- session.query(models.VolumeTypeExtraSpecs).\
- filter_by(volume_type_id=volume_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def volume_get_active_by_window(context, begin, end=None,
- project_id=None):
- """Return volumes that were active during window."""
- session = get_session()
- query = session.query(models.Volume)
-
- query = query.filter(or_(models.Volume.deleted_at == None,
- models.Volume.deleted_at > begin))
- if end:
- query = query.filter(models.Volume.created_at < end)
- if project_id:
- query = query.filter_by(project_id=project_id)
-
- return query.all()
-
-
-####################
-
-
-def _volume_type_extra_specs_query(context, volume_type_id, session=None):
- return model_query(context, models.VolumeTypeExtraSpecs, session=session,
- read_deleted="no").\
- filter_by(volume_type_id=volume_type_id)
-
-
-@require_context
-def volume_type_extra_specs_get(context, volume_type_id):
- rows = _volume_type_extra_specs_query(context, volume_type_id).\
- all()
-
- result = {}
- for row in rows:
- result[row['key']] = row['value']
-
- return result
-
+ values = {}
+ # NOTE(dricco): We will be mostly updating current usage records vs
+ # updating total or creating records. Optimize accordingly.
+ if not update_totals:
+ values = {'curr_last_refreshed': last_refreshed,
+ 'curr_reads': rd_req,
+ 'curr_read_bytes': rd_bytes,
+ 'curr_writes': wr_req,
+ 'curr_write_bytes': wr_bytes,
+ 'instance_id': instance_id}
+ else:
+ values = {'tot_last_refreshed': last_refreshed,
+ 'tot_reads': models.VolumeUsage.tot_reads + rd_req,
+ 'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
+ rd_bytes,
+ 'tot_writes': models.VolumeUsage.tot_writes + wr_req,
+ 'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
+ wr_bytes,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'instance_id': instance_id}
+
+ rows = model_query(context, models.VolumeUsage,
+ session=session, read_deleted="yes").\
+ filter_by(volume_id=id).\
+ update(values, synchronize_session=False)
-@require_context
-def volume_type_extra_specs_delete(context, volume_type_id, key):
- _volume_type_extra_specs_query(context, volume_type_id).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ if rows:
+ return
+ vol_usage = models.VolumeUsage()
+ vol_usage.tot_last_refreshed = timeutils.utcnow()
+ vol_usage.curr_last_refreshed = timeutils.utcnow()
+ vol_usage.volume_id = id
-@require_context
-def volume_type_extra_specs_get_item(context, volume_type_id, key,
- session=None):
- result = _volume_type_extra_specs_query(
- context, volume_type_id, session=session).\
- filter_by(key=key).\
- first()
+ if not update_totals:
+ vol_usage.curr_reads = rd_req
+ vol_usage.curr_read_bytes = rd_bytes
+ vol_usage.curr_writes = wr_req
+ vol_usage.curr_write_bytes = wr_bytes
+ else:
+ vol_usage.tot_reads = rd_req
+ vol_usage.tot_read_bytes = rd_bytes
+ vol_usage.tot_writes = wr_req
+ vol_usage.tot_write_bytes = wr_bytes
- if not result:
- raise exception.VolumeTypeExtraSpecsNotFound(
- extra_specs_key=key, volume_type_id=volume_type_id)
+ vol_usage.save(session=session)
- return result
-
-
-@require_context
-def volume_type_extra_specs_update_or_create(context, volume_type_id,
- specs):
- session = get_session()
- spec_ref = None
- for key, value in specs.iteritems():
- try:
- spec_ref = volume_type_extra_specs_get_item(
- context, volume_type_id, key, session)
- except exception.VolumeTypeExtraSpecsNotFound:
- spec_ref = models.VolumeTypeExtraSpecs()
- spec_ref.update({"key": key, "value": value,
- "volume_type_id": volume_type_id,
- "deleted": False})
- spec_ref.save(session=session)
- return specs
+ return
####################
def s3_image_get(context, image_id):
- """Find local s3 image represented by the provided id"""
+ """Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
@@ -4638,7 +4183,7 @@ def s3_image_get(context, image_id):
def s3_image_get_by_uuid(context, image_uuid):
- """Find local s3 image represented by the provided uuid"""
+ """Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
@@ -4650,7 +4195,7 @@ def s3_image_get_by_uuid(context, image_uuid):
def s3_image_create(context, image_uuid):
- """Create local s3 image represented by provided uuid"""
+ """Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
@@ -4664,243 +4209,54 @@ def s3_image_create(context, image_uuid):
####################
-@require_admin_context
-def sm_backend_conf_create(context, values):
- session = get_session()
- with session.begin():
- config_params = values['config_params']
- backend_conf = model_query(context, models.SMBackendConf,
- session=session,
- read_deleted="yes").\
- filter_by(config_params=config_params).\
- first()
-
- if backend_conf:
- raise exception.Duplicate(_('Backend exists'))
- else:
- backend_conf = models.SMBackendConf()
- backend_conf.update(values)
- backend_conf.save(session=session)
- return backend_conf
-
-
-@require_admin_context
-def sm_backend_conf_update(context, sm_backend_id, values):
- session = get_session()
- with session.begin():
- backend_conf = model_query(context, models.SMBackendConf,
- session=session,
- read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- first()
-
- if not backend_conf:
- raise exception.NotFound(
- _("No backend config with id %(sm_backend_id)s") % locals())
-
- backend_conf.update(values)
- backend_conf.save(session=session)
- return backend_conf
-
-
-@require_admin_context
-def sm_backend_conf_delete(context, sm_backend_id):
- # FIXME(sirp): for consistency, shouldn't this just mark as deleted with
- # `purge` actually deleting the record?
- session = get_session()
- with session.begin():
- model_query(context, models.SMBackendConf, session=session,
- read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- delete()
-
-
-@require_admin_context
-def sm_backend_conf_get(context, sm_backend_id):
- result = model_query(context, models.SMBackendConf, read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- first()
-
- if not result:
- raise exception.NotFound(_("No backend config with id "
- "%(sm_backend_id)s") % locals())
-
- return result
-
-
-@require_admin_context
-def sm_backend_conf_get_by_sr(context, sr_uuid):
- result = model_query(context, models.SMBackendConf, read_deleted="yes").\
- filter_by(sr_uuid=sr_uuid).\
- first()
- if not result:
- raise exception.NotFound(_("No backend config with sr uuid "
- "%(sr_uuid)s") % locals())
- return result
-
-
-@require_admin_context
-def sm_backend_conf_get_all(context):
- return model_query(context, models.SMBackendConf, read_deleted="yes").\
- all()
-
-
-####################
-
-
-def _sm_flavor_get_query(context, sm_flavor_id, session=None):
- return model_query(context, models.SMFlavors, session=session,
- read_deleted="yes").\
- filter_by(id=sm_flavor_id)
-
-
-@require_admin_context
-def sm_flavor_create(context, values):
- session = get_session()
- with session.begin():
- sm_flavor = model_query(context, models.SMFlavors,
- session=session,
- read_deleted="yes").\
- filter_by(label=values['label']).\
- first()
- if not sm_flavor:
- sm_flavor = models.SMFlavors()
- sm_flavor.update(values)
- sm_flavor.save(session=session)
- else:
- raise exception.Duplicate(_('Flavor exists'))
- return sm_flavor
-
-
-@require_admin_context
-def sm_flavor_update(context, sm_flavor_id, values):
- session = get_session()
- with session.begin():
- sm_flavor = model_query(context, models.SMFlavors,
- session=session,
- read_deleted="yes").\
- filter_by(id=sm_flavor_id).\
- first()
- if not sm_flavor:
- raise exception.NotFound(
- _('%(sm_flavor_id) flavor not found') % locals())
- sm_flavor.update(values)
- sm_flavor.save(session=session)
- return sm_flavor
-
-
-@require_admin_context
-def sm_flavor_delete(context, sm_flavor_id):
- session = get_session()
- with session.begin():
- _sm_flavor_get_query(context, sm_flavor_id).delete()
-
-
-@require_admin_context
-def sm_flavor_get(context, sm_flavor_id):
- result = _sm_flavor_get_query(context, sm_flavor_id).first()
-
- if not result:
- raise exception.NotFound(
- _("No sm_flavor called %(sm_flavor_id)s") % locals())
-
- return result
-
-
-@require_admin_context
-def sm_flavor_get_all(context):
- return model_query(context, models.SMFlavors, read_deleted="yes").all()
-
-
-@require_admin_context
-def sm_flavor_get_by_label(context, sm_flavor_label):
- result = model_query(context, models.SMFlavors,
- read_deleted="yes").\
- filter_by(label=sm_flavor_label).first()
- if not result:
- raise exception.NotFound(
- _("No sm_flavor called %(sm_flavor_label)s") % locals())
- return result
-
-
-###############################
-
-
-def _sm_volume_get_query(context, volume_id, session=None):
- return model_query(context, models.SMVolume, session=session,
- read_deleted="yes").\
- filter_by(id=volume_id)
-
-
-def sm_volume_create(context, values):
- sm_volume = models.SMVolume()
- sm_volume.update(values)
- sm_volume.save()
- return sm_volume
-
-
-def sm_volume_update(context, volume_id, values):
- sm_volume = sm_volume_get(context, volume_id)
- sm_volume.update(values)
- sm_volume.save()
- return sm_volume
-
-
-def sm_volume_delete(context, volume_id):
- session = get_session()
- with session.begin():
- _sm_volume_get_query(context, volume_id, session=session).delete()
-
-
-def sm_volume_get(context, volume_id):
- result = _sm_volume_get_query(context, volume_id).first()
-
- if not result:
- raise exception.NotFound(
- _("No sm_volume with id %(volume_id)s") % locals())
-
- return result
-
-
-def sm_volume_get_all(context):
- return model_query(context, models.SMVolume, read_deleted="yes").all()
+def _aggregate_get_query(context, model_class, id_field=None, id=None,
+ session=None, read_deleted=None):
+ columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
+ query = model_query(context, model_class, session=session,
+ read_deleted=read_deleted)
-################
+ for c in columns_to_join.get(model_class, []):
+ query = query.options(joinedload(c))
+ if id and id_field:
+ query = query.filter(id_field == id)
-def _aggregate_get_query(context, model_class, id_field, id,
- session=None, read_deleted=None):
- return model_query(context, model_class, session=session,
- read_deleted=read_deleted).filter(id_field == id)
+ return query
@require_admin_context
def aggregate_create(context, values, metadata=None):
session = get_session()
- aggregate = _aggregate_get_query(context,
- models.Aggregate,
- models.Aggregate.name,
- values['name'],
- session=session,
- read_deleted='no').first()
+ query = _aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.name,
+ values['name'],
+ session=session,
+ read_deleted='no')
+ aggregate = query.options(joinedload('_metadata')).first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
+ # We don't want these to be lazy loaded later. We know there is
+ # nothing here since we just created this aggregate.
+ aggregate._hosts = []
+ aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
- return aggregate
+ return aggregate_get(context, aggregate.id)
@require_admin_context
def aggregate_get(context, aggregate_id):
- aggregate = _aggregate_get_query(context,
- models.Aggregate,
- models.Aggregate.id,
- aggregate_id).first()
+ query = _aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.id,
+ aggregate_id)
+ aggregate = query.options(joinedload('_metadata')).first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@@ -4910,8 +4266,8 @@ def aggregate_get(context, aggregate_id):
@require_admin_context
def aggregate_get_by_host(context, host, key=None):
- query = model_query(context, models.Aggregate).join(
- "_hosts").filter(models.AggregateHost.host == host)
+ query = _aggregate_get_query(context, models.Aggregate,
+ models.AggregateHost.host, host)
if key:
query = query.join("_metadata").filter(
@@ -4932,18 +4288,38 @@ def aggregate_metadata_get_by_host(context, host, key=None):
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
- return metadata
+ return dict(metadata)
+
+
+@require_admin_context
+def aggregate_host_get_by_metadata_key(context, key):
+ query = model_query(context, models.Aggregate).join(
+ "_metadata").filter(models.AggregateMetadata.key == key)
+ rows = query.all()
+ metadata = collections.defaultdict(set)
+ for agg in rows:
+ for agghost in agg._hosts:
+ metadata[agghost.host].add(agg._metadata[0]['value'])
+ return dict(metadata)
@require_admin_context
def aggregate_update(context, aggregate_id, values):
session = get_session()
- aggregate = _aggregate_get_query(context,
+ aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
- session=session).first()
+ session=session).
+ options(joinedload('_metadata')).first())
+
if aggregate:
+ if "availability_zone" in values:
+ az = values.pop('availability_zone')
+ if 'metadata' not in values:
+ values['metadata'] = {'availability_zone': az}
+ else:
+ values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
@@ -4954,36 +4330,44 @@ def aggregate_update(context, aggregate_id, values):
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
- return aggregate
+ return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@require_admin_context
def aggregate_delete(context, aggregate_id):
- query = _aggregate_get_query(context,
- models.Aggregate,
- models.Aggregate.id,
- aggregate_id)
- if query.first():
- query.update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- else:
- raise exception.AggregateNotFound(aggregate_id=aggregate_id)
+ session = get_session()
+ with session.begin():
+ count = _aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.id,
+ aggregate_id,
+ session=session).\
+ soft_delete()
+ if count == 0:
+ raise exception.AggregateNotFound(aggregate_id=aggregate_id)
- #Delete Metadata
- model_query(context,
- models.AggregateMetadata).\
- filter_by(aggregate_id=aggregate_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ #Delete Metadata
+ model_query(context,
+ models.AggregateMetadata, session=session).\
+ filter_by(aggregate_id=aggregate_id).\
+ soft_delete()
@require_admin_context
def aggregate_get_all(context):
- return model_query(context, models.Aggregate).all()
+ return _aggregate_get_query(context, models.Aggregate).all()
+
+
+@require_admin_context
+def aggregate_metadata_get_query(context, aggregate_id, session=None,
+ read_deleted="yes"):
+ return model_query(context,
+ models.AggregateMetadata,
+ read_deleted=read_deleted,
+ session=session).\
+ filter_by(aggregate_id=aggregate_id)
@require_admin_context
@@ -4999,16 +4383,13 @@ def aggregate_metadata_get(context, aggregate_id):
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
- query = _aggregate_get_query(context,
+ count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
- filter_by(key=key)
- if query.first():
- query.update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- else:
+ filter_by(key=key).\
+ soft_delete()
+ if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@@ -5034,33 +4415,31 @@ def aggregate_metadata_get_item(context, aggregate_id, key, session=None):
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
session = get_session()
-
- if set_delete:
- original_metadata = aggregate_metadata_get(context, aggregate_id)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = aggregate_metadata_get_item(context, aggregate_id,
- meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- for meta_key, meta_value in metadata.iteritems():
- item = {"value": meta_value}
- try:
- meta_ref = aggregate_metadata_get_item(context, aggregate_id,
- meta_key, session)
- if meta_ref.deleted:
- item.update({'deleted': False, 'deleted_at': None})
- except exception.AggregateMetadataNotFound:
+ all_keys = metadata.keys()
+ with session.begin():
+ query = aggregate_metadata_get_query(context, aggregate_id,
+ session=session)
+ if set_delete:
+ query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=False)
+
+ query = query.filter(models.AggregateMetadata.key.in_(all_keys))
+ already_existing_keys = []
+ for meta_ref in query.all():
+ key = meta_ref.key
+ meta_ref.update({"value": metadata[key],
+ "deleted": False,
+ "deleted_at": None})
+ already_existing_keys.append(key)
+
+ for key in set(all_keys) - set(already_existing_keys):
meta_ref = models.AggregateMetadata()
- item.update({"key": meta_key, "aggregate_id": aggregate_id})
+ meta_ref.update({"key": key,
+ "value": metadata[key],
+ "aggregate_id": aggregate_id})
+ session.add(meta_ref)
- meta_ref.update(item)
- meta_ref.save(session=session)
-
- return metadata
+ return metadata
@require_admin_context
@@ -5076,15 +4455,13 @@ def aggregate_host_get_all(context, aggregate_id):
@require_admin_context
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
- query = _aggregate_get_query(context,
+ count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
- aggregate_id).filter_by(host=host)
- if query.first():
- query.update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- else:
+ aggregate_id).\
+ filter_by(host=host).\
+ soft_delete()
+ if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@@ -5130,7 +4507,7 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
- order_by(desc("created_at")).\
+ order_by(desc("created_at"), desc("id")).\
all()
output = {}
@@ -5147,9 +4524,128 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
##################
+def action_start(context, values):
+ action_ref = models.InstanceAction()
+ action_ref.update(values)
+ action_ref.save()
+ return action_ref
+
+
+def action_finish(context, values):
+ session = get_session()
+ with session.begin():
+ action_ref = model_query(context, models.InstanceAction,
+ session=session).\
+ filter_by(instance_uuid=values['instance_uuid']).\
+ filter_by(request_id=values['request_id']).\
+ first()
+
+ if not action_ref:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ action_ref.update(values)
+ return action_ref
+
+
+def actions_get(context, instance_uuid):
+ """Get all instance actions for the provided uuid."""
+ actions = model_query(context, models.InstanceAction).\
+ filter_by(instance_uuid=instance_uuid).\
+ order_by(desc("created_at")).\
+ all()
+ return actions
+
+
+def action_get_by_id(context, instance_uuid, action_id):
+ """Get the action by id and given instance."""
+ action = model_query(context, models.InstanceAction).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(id=action_id).\
+ first()
+
+ return action
+
+
+def _action_get_by_request_id(context, instance_uuid, request_id,
+ session=None):
+ result = model_query(context, models.InstanceAction, session=session).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(request_id=request_id).\
+ first()
+ return result
+
+
+def action_event_start(context, values):
+ """Start an event on an instance action."""
+ session = get_session()
+ with session.begin():
+ action = _action_get_by_request_id(context, values['instance_uuid'],
+ values['request_id'], session)
+
+ if not action:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ values['action_id'] = action['id']
+
+ event_ref = models.InstanceActionEvent()
+ event_ref.update(values)
+ event_ref.save(session=session)
+ return event_ref
+
+
+def action_event_finish(context, values):
+ """Finish an event on an instance action."""
+ session = get_session()
+ with session.begin():
+ action = _action_get_by_request_id(context, values['instance_uuid'],
+ values['request_id'], session)
+
+ if not action:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ event_ref = model_query(context, models.InstanceActionEvent,
+ session=session).\
+ filter_by(action_id=action['id']).\
+ filter_by(event=values['event']).\
+ first()
+
+ if not event_ref:
+ raise exception.InstanceActionEventNotFound(action_id=action['id'],
+ event=values['event'])
+ event_ref.update(values)
+ return event_ref
+
+
+def action_events_get(context, action_id):
+ events = model_query(context, models.InstanceActionEvent).\
+ filter_by(action_id=action_id).\
+ order_by(desc("created_at")).\
+ all()
+
+ return events
+
+
+def action_event_get_by_id(context, action_id, event_id):
+ event = model_query(context, models.InstanceActionEvent).\
+ filter_by(action_id=action_id).\
+ filter_by(id=event_id).\
+ first()
+
+ return event
+
+
+##################
+
+
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
- """Create ec2 compatable instance by provided uuid"""
+ """Create ec2 compatable instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py b/nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py
deleted file mode 100644
index d08afd16e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime
-from sqlalchemy import MetaData, Integer, String, Table
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # New table
- quota_classes = Table('quota_classes', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('class_name',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('hard_limit', Integer(), nullable=True),
- )
-
- try:
- quota_classes.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_classes))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_classes = Table('quota_classes', meta, autoload=True)
- try:
- quota_classes.drop()
- except Exception:
- LOG.error(_("quota_classes table not dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py b/nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py
deleted file mode 100644
index d9308121d..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- """Map quotas hard_limit from NULL to -1"""
- _migrate_unlimited(migrate_engine, None, -1)
-
-
-def downgrade(migrate_engine):
- """Map quotas hard_limit from -1 to NULL"""
- _migrate_unlimited(migrate_engine, -1, None)
-
-
-def _migrate_unlimited(migrate_engine, old_limit, new_limit):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- def _migrate(table_name):
- table = sqlalchemy.Table(table_name, meta, autoload=True)
- table.update().\
- where(table.c.hard_limit == old_limit).\
- values(hard_limit=new_limit).execute()
-
- _migrate('quotas')
- _migrate('quota_classes')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py b/nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py
deleted file mode 100644
index da985b956..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
-
- tables = ["agent_builds", "aggregate_hosts", "aggregate_metadata",
- "aggregates", "block_device_mapping", "bw_usage_cache",
- "dns_domains", "instance_faults", "instance_type_extra_specs",
- "provider_fw_rules", "quota_classes", "s3_images",
- "sm_backend_config", "sm_flavors", "sm_volume",
- "virtual_storage_arrays", "volume_metadata",
- "volume_type_extra_specs", "volume_types"]
-
- meta = MetaData()
- meta.bind = migrate_engine
- if migrate_engine.name == "mysql":
- d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';")
- for row in d.fetchall():
- table_name = row[0]
- if table_name in tables:
- migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" %
- table_name)
-
-
-def downgrade(migrate_engine):
- pass
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
deleted file mode 100644
index ce07905c8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, BigInteger
-from sqlalchemy import MetaData, Integer, String, Table
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # add column:
- bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
- uuid = Column('uuid', String(36))
-
- # clear the cache to get rid of entries with no uuid
- migrate_engine.execute(bw_usage_cache.delete())
-
- bw_usage_cache.create_column(uuid)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # drop column:
- bw_usage_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('mac', String(255)),
- Column('uuid', String(36)),
- Column('start_period', DateTime(timezone=False), nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()),
- extend_existing=True)
-
- bw_usage_cache.drop_column('uuid')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
deleted file mode 100644
index 73d8b6968..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(block_device_mapping)
-
- try:
- block_device_mapping.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == block_device_mapping.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(block_device_mapping.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[block_device_mapping.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- block_device_mapping.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(block_device_mapping)
-
- try:
- block_device_mapping.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == block_device_mapping.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- block_device_mapping.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql
deleted file mode 100644
index 3699ce9ab..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql
+++ /dev/null
@@ -1,97 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- NULL,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- instance_uuid
- FROM block_device_mapping;
-
- UPDATE block_device_mapping_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE block_device_mapping_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql
deleted file mode 100644
index d75d2ffa2..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql
+++ /dev/null
@@ -1,97 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- NULL
- FROM block_device_mapping;
-
- UPDATE block_device_mapping_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE block_device_mapping_backup.instance_id = instances.id
- );
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- instance_uuid
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py b/nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py
deleted file mode 100644
index d878e250b..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-from nova.openstack.common import log as logging
-from nova import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Build mapping tables for our volume uuid migration.
-
- These mapping tables serve two purposes:
- 1. Provide a method for downgrade after UUID conversion
- 2. Provide a uuid to associate with existing volumes and snapshots
- when we do the actual datatype migration from int to uuid
-
- """
- meta = MetaData()
- meta.bind = migrate_engine
-
- volume_id_mappings = Table('volume_id_mappings', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- volume_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'volume_id_mappings'")
- meta.drop_all(tables=[volume_id_mappings])
- raise
-
- snapshot_id_mappings = Table('snapshot_id_mappings', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- snapshot_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'snapshot_id_mappings'")
- meta.drop_all(tables=[snapshot_id_mappings])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE snapshot_id_mappings "
- "Engine=InnoDB")
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- volume_id_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- old_id = v['id']
- new_id = utils.gen_uuid()
- row = volume_id_mappings.insert()
- row.execute({'id': old_id,
- 'uuid': str(new_id)})
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- old_id = s['id']
- new_id = utils.gen_uuid()
- row = snapshot_id_mappings.insert()
- row.execute({'id': old_id,
- 'uuid': str(new_id)})
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- volume_id_mappings = Table('volume_id_mappings', meta, autoload=True)
- volume_id_mappings.drop()
-
- snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True)
- snapshot_id_mappings.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py b/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
deleted file mode 100644
index 4be63b322..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import Integer
-from sqlalchemy import MetaData, String, Table
-
-from migrate import ForeignKeyConstraint
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Convert volume and snapshot id columns from int to varchar."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- sm_volume = Table('sm_volume', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id],
- refcolumns=[snapshots.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
- volumes.c.id.alter(String(36), primary_key=True)
- volumes.c.snapshot_id.alter(String(36))
- volume_metadata.c.volume_id.alter(String(36), nullable=False)
- snapshots.c.id.alter(String(36), primary_key=True)
- snapshots.c.volume_id.alter(String(36))
- sm_volume.c.id.alter(String(36))
- block_device_mapping.c.volume_id.alter(String(36))
- block_device_mapping.c.snapshot_id.alter(String(36))
- iscsi_targets.c.volume_id.alter(String(36), nullable=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
- # NOTE(jdg) We're intentionally leaving off FK's on BDM
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- """Convert volume and snapshot id columns back to int."""
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
- if dialect.startswith('sqlite'):
- return
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- sm_volume = Table('sm_volume', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
- volumes.c.id.alter(Integer, primary_key=True, autoincrement=True)
- volumes.c.snapshot_id.alter(Integer)
- volume_metadata.c.volume_id.alter(Integer, nullable=False)
- snapshots.c.id.alter(Integer, primary_key=True, autoincrement=True)
- snapshots.c.volume_id.alter(Integer)
- sm_volume.c.id.alter(Integer)
- block_device_mapping.c.volume_id.alter(Integer)
- block_device_mapping.c.snapshot_id.alter(Integer)
- iscsi_targets.c.volume_id.alter(Integer, nullable=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- # NOTE(jdg) Put the BDM foreign keys back in place
- fkeys = list(block_device_mapping.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id],
- refcolumns=[snapshots.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql
deleted file mode 100644
index 7d89da247..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql
+++ /dev/null
@@ -1,226 +0,0 @@
-BEGIN TRANSACTION;
-
- -- change id and snapshot_id datatypes in volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
- DROP TABLE volumes;
- ALTER TABLE volumes_backup RENAME TO volumes;
-
- -- change id and volume_id datatypes in snapshots table
- CREATE TABLE snapshots_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- volume_id INTEGER,
- status VARCHAR(255),
- progress VARCHAR(255),
- volume_size INTEGER,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO snapshots_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- user_id,
- project_id,
- volume_id,
- status,
- progress,
- volume_size,
- display_name,
- display_description
- FROM snapshots;
- DROP TABLE snapshots;
- ALTER TABLE snapshots_backup RENAME TO snapshots;
-
- -- change id and volume_id datatypes in iscsi_targets table
- CREATE TABLE iscsi_targets_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- target_num INTEGER,
- host VARCHAR(255),
- volume_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO iscsi_targets_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- target_num,
- host,
- volume_id
- FROM iscsi_targets;
- DROP TABLE iscsi_targets;
- ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets;
-
- CREATE TABLE volume_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- key VARCHAR(255),
- value VARCHAR(255),
- volume_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_metadata_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- key,
- value,
- volume_id
- FROM volume_metadata;
- DROP TABLE volume_metadata;
- ALTER TABLE volume_metadata_backup RENAME TO volume_metadata;
-
- -- change volume_id and snapshot_id datatypes in bdm table
- CREATE TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- device_name VARCHAR(255),
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info VARCHAR(255),
- FOREIGN KEY(instance_uuid) REFERENCES instances(id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO block_device_mapping_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
- DROP TABLE block_device_mapping;
- ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping;
-
- -- change volume_id and sm_volume_table
- CREATE TABLE sm_volume_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- backend_id INTEGER NOT NULL,
- vdi_uuid VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0,1))
- );
- INSERT INTO sm_volume_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- backend_id,
- vdi_uuid
- FROM sm_volume;
- DROP TABLE sm_volume;
- ALTER TABLE sm_volume_backup RENAME TO sm_volume;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql
deleted file mode 100644
index 53fbc69f6..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql
+++ /dev/null
@@ -1,226 +0,0 @@
-BEGIN TRANSACTION;
-
- -- change id and snapshot_id datatypes in volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
- DROP TABLE volumes;
- ALTER TABLE volumes_backup RENAME TO volumes;
-
- -- change id and volume_id datatypes in snapshots table
- CREATE TABLE snapshots_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- volume_id VARCHAR(36),
- status VARCHAR(255),
- progress VARCHAR(255),
- volume_size INTEGER,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO snapshots_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- user_id,
- project_id,
- volume_id,
- status,
- progress,
- volume_size,
- display_name,
- display_description
- FROM snapshots;
- DROP TABLE snapshots;
- ALTER TABLE snapshots_backup RENAME TO snapshots;
-
- -- change id and volume_id datatypes in iscsi_targets table
- CREATE TABLE iscsi_targets_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- target_num INTEGER,
- host VARCHAR(255),
- volume_id VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO iscsi_targets_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- target_num,
- host,
- volume_id
- FROM iscsi_targets;
- DROP TABLE iscsi_targets;
- ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets;
-
- CREATE TABLE volume_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- key VARCHAR(255),
- value VARCHAR(255),
- volume_id VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_metadata_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- key,
- value,
- volume_id
- FROM volume_metadata;
- DROP TABLE volume_metadata;
- ALTER TABLE volume_metadata_backup RENAME TO volume_metadata;
-
- -- change volume_id and snapshot_id datatypes in bdm table
- CREATE TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- device_name VARCHAR(255),
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id VARCHAR(36),
- volume_id VARCHAR(36),
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info VARCHAR(255),
- FOREIGN KEY(instance_uuid) REFERENCES instances(id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO block_device_mapping_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
- DROP TABLE block_device_mapping;
- ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping;
-
- -- change volume_id and sm_volume_table
- CREATE TABLE sm_volume_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- backend_id INTEGER NOT NULL,
- vdi_uuid VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0,1))
- );
- INSERT INTO sm_volume_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- backend_id,
- vdi_uuid
- FROM sm_volume;
- DROP TABLE sm_volume;
- ALTER TABLE sm_volume_backup RENAME TO sm_volume;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py
deleted file mode 100644
index dadf15d30..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from migrate import ForeignKeyConstraint, NotSupportedError
-from sqlalchemy import MetaData, select, Table
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Convert volume and snapshot id columns from int to varchar."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- sm_volumes = Table('sm_volume', meta, autoload=True)
-
- volume_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- fkey_columns = [
- iscsi_targets.c.volume_id,
- volume_metadata.c.volume_id,
- sm_volumes.c.id,
- ]
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- LOG.info('Dropping foreign key %s' % fkey_name)
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id],
- name=fkey_name)
- try:
- fkey.drop()
- except NotSupportedError:
- # NOTE(sirp): sqlite doesn't support ALTER TABLE DROP
- # CONSTRAINT and sqlalchemy-migrate doesn't yet have a
- # work-around using temp tables.
- pass
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- new_id = select([volume_mappings.c.uuid],
- volume_mappings.c.id == v['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- sm_volumes.update().\
- where(sm_volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- iscsi_targets.update().\
- where(iscsi_targets.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- volume_metadata.update().\
- where(volume_metadata.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- new_id = select([snapshot_mappings.c.uuid],
- snapshot_mappings.c.id == s['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.id == s['id']).\
- values(id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id])
- fkey.create()
- LOG.info('Created foreign key %s' % fkey_name)
-
-
-def downgrade(migrate_engine):
- """Convert volume and snapshot id columns back to int."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- sm_volumes = Table('sm_volume', meta, autoload=True)
-
- volume_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- fkey_columns = [
- iscsi_targets.c.volume_id,
- volume_metadata.c.volume_id,
- sm_volumes.c.id,
- ]
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- LOG.info('Dropping foreign key %s' % fkey_name)
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id],
- name=fkey_name)
- try:
- fkey.drop()
- except NotSupportedError:
- # NOTE(sirp): sqlite doesn't support ALTER TABLE DROP
- # CONSTRAINT and sqlalchemy-migrate doesn't yet have a
- # work-around using temp tables.
- pass
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- new_id = select([volume_mappings.c.id],
- volume_mappings.c.uuid == v['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- sm_volumes.update().\
- where(sm_volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- iscsi_targets.update().\
- where(iscsi_targets.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- volume_metadata.update().\
- where(volume_metadata.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- new_id = select([snapshot_mappings.c.id],
- snapshot_mappings.c.uuid == s['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.id == s['id']).\
- values(id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id])
- fkey.create()
- LOG.info('Created foreign key %s' % fkey_name)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py
deleted file mode 100644
index 85856ed6a..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- instance_system_metadata = Table('instance_system_metadata', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_uuid',
- String(36),
- ForeignKey('instances.uuid'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=True,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- Column('value',
- String(length=255, convert_unicode=True,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- mysql_engine='InnoDB')
-
- try:
- instance_system_metadata.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(instance_system_metadata))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- instance_system_metadata = Table(
- 'instance_system_metadata', meta, autoload=True)
- instance_system_metadata.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py b/nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py
deleted file mode 100644
index 0200861b2..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String, Table, Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_actions = Table('instance_actions', meta, autoload=True)
- instance_actions.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True,
- autoload_with=migrate_engine)
-
- instance_actions = Table('instance_actions', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id')),
- Column('action',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('error',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
- instance_actions.create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py b/nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py
deleted file mode 100644
index 5b1d9b490..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE(dprince): Need to rename the leftover zones stuff and quota_new
- # stuff from Essex for PostgreSQL.
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE zones_id_seq RENAME TO cells_id_seq;
- ALTER TABLE ONLY cells DROP CONSTRAINT zones_pkey;
- ALTER TABLE ONLY cells ADD CONSTRAINT cells_pkey
- PRIMARY KEY (id);
-
- ALTER TABLE quotas_new_id_seq RENAME TO quotas_id_seq;
- ALTER TABLE ONLY quotas DROP CONSTRAINT quotas_new_pkey;
- ALTER TABLE ONLY quotas ADD CONSTRAINT quotas_pkey
- PRIMARY KEY (id);"""
- migrate_engine.execute(sql)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE cells_id_seq RENAME TO zones_id_seq;
- ALTER TABLE ONLY cells DROP CONSTRAINT cells_pkey;
- ALTER TABLE ONLY cells ADD CONSTRAINT zones_pkey
- PRIMARY KEY (id);
-
- ALTER TABLE quotas_id_seq RENAME TO quotas_new_id_seq;
- ALTER TABLE ONLY quotas DROP CONSTRAINT quotas_pkey;
- ALTER TABLE ONLY quotas ADD CONSTRAINT quotas_new_pkey
- PRIMARY KEY (id);"""
- migrate_engine.execute(sql)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py
deleted file mode 100644
index 08501177d..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# Copyright 2012 SolidFire Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Integer, String, Table
-from sqlalchemy import select, Column
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- instance_uuid_column = Column('instance_uuid', String(36))
-
- instance_uuid_column.create(volumes)
- try:
- volumes.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == volumes.c.instance_id)
- ).execute()
- except Exception:
- instance_uuid_column.drop()
-
- fkeys = list(volumes.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fk_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[volumes.c.instance_id],
- refcolumns=[instances.c.id],
- name=fk_name).drop()
-
- except Exception:
- LOG.error(_("foreign key could not be dropped"))
- raise
-
- volumes.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- instance_id_column = Column('instance_id', Integer)
-
- instance_id_column.create(volumes)
- try:
- volumes.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == volumes.c.instance_uuid)
- ).execute()
- except Exception:
- instance_id_column.drop()
-
- fkeys = list(volumes.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fk_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[volumes.c.instance_id],
- refcolumns=[instances.c.id],
- name=fk_name).create()
-
- except Exception:
- LOG.error(_("foreign key could not be created"))
- raise
-
- volumes.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql
deleted file mode 100644
index 7c13455e4..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql
+++ /dev/null
@@ -1,133 +0,0 @@
-BEGIN TRANSACTION;
- -- change instance_id volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- NULL,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
-
- UPDATE volumes_backup
- SET instance_id =
- (SELECT id
- FROM instances
- WHERE volumes_backup.instance_uuid = instances.uuid
- );
- DROP TABLE volumes;
-
- CREATE TABLE volumes(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY (instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes_backup;
- DROP TABLE volumes_backup;
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql
deleted file mode 100644
index 130e11030..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql
+++ /dev/null
@@ -1,132 +0,0 @@
-BEGIN TRANSACTION;
- -- change instance_id volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- NULL,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
-
- UPDATE volumes_backup
- SET instance_uuid =
- (SELECT uuid
- FROM instances
- WHERE volumes_backup.instance_id = instances.id
- );
- DROP TABLE volumes;
-
- CREATE TABLE volumes(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes_backup;
- DROP TABLE volumes_backup;
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py b/nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py
deleted file mode 100644
index 0e51c644f..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE(dprince): The old dns_domains table is in the 'latin1'
- # charset and had its primary key length set to 512.
- # This is too long to be a valid pkey in the 'utf8' table charset
- # and is the root cause of errors like:
- #
- # 1) Dumping a database with mysqldump and trying to import it fails
- # because this table is latin1 but fkeys to utf8 tables (projects).
- #
- # 2) Trying to alter the old dns_domains table fails with errors like:
- # mysql> ALTER TABLE dns_domains DROP PRIMARY KEY;
- # ERROR 1025 (HY000): Error on rename of './nova/#sql-6cf_855'....
- #
- # In short this table is just in a bad state. So... lets create a new one
- # with a shorter 'domain' column which is valid for the utf8 charset.
- # https://bugs.launchpad.net/nova/+bug/993663
-
- #rename old table
- dns_domains_old = Table('dns_domains', meta, autoload=True)
- dns_domains_old.rename(name='dns_domains_old')
-
- # NOTE(dprince): manually remove pkey/fkey for postgres
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_pkey;
- ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_project_id_fkey;"""
- migrate_engine.execute(sql)
-
- #Bind new metadata to avoid issues after the rename
- meta = MetaData()
- meta.bind = migrate_engine
- projects = Table('projects', meta, autoload=True) # Required for fkey
-
- dns_domains_new = Table('dns_domains', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('domain', String(length=255), nullable=False, primary_key=True),
- Column('scope', String(length=255)),
- Column('availability_zone', String(length=255)),
- Column('project_id', String(length=255), ForeignKey('projects.id')),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- dns_domains_new.create()
-
- dns_domains_old = Table('dns_domains_old', meta, autoload=True)
- record_list = list(dns_domains_old.select().execute())
- for rec in record_list:
- row = dns_domains_new.insert()
- row.execute({'created_at': rec['created_at'],
- 'updated_at': rec['updated_at'],
- 'deleted_at': rec['deleted_at'],
- 'deleted': rec['deleted'],
- 'domain': rec['domain'],
- 'scope': rec['scope'],
- 'availability_zone': rec['availability_zone'],
- 'project_id': rec['project_id'],
- })
-
- dns_domains_old.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- dns_domains_old = Table('dns_domains', meta, autoload=True)
- dns_domains_old.rename(name='dns_domains_old')
-
- # NOTE(dprince): manually remove pkey/fkey for postgres
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_pkey;
- ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_project_id_fkey;"""
- migrate_engine.execute(sql)
-
- #Bind new metadata to avoid issues after the rename
- meta = MetaData()
- meta.bind = migrate_engine
-
- dns_domains_new = Table('dns_domains', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('domain', String(length=512), primary_key=True, nullable=False),
- Column('scope', String(length=255)),
- Column('availability_zone', String(length=255)),
- Column('project_id', String(length=255)),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
- dns_domains_new.create()
-
- dns_domains_old = Table('dns_domains_old', meta, autoload=True)
- record_list = list(dns_domains_old.select().execute())
- for rec in record_list:
- row = dns_domains_new.insert()
- row.execute({'created_at': rec['created_at'],
- 'updated_at': rec['updated_at'],
- 'deleted_at': rec['deleted_at'],
- 'deleted': rec['deleted'],
- 'domain': rec['domain'],
- 'scope': rec['scope'],
- 'availability_zone': rec['availability_zone'],
- 'project_id': rec['project_id'],
- })
-
- dns_domains_old.drop()
-
- # NOTE(dprince): We can't easily add the MySQL Fkey on the downgrade
- # because projects is 'utf8' where dns_domains is 'latin1'.
- if migrate_engine.name != "mysql":
- projects = Table('projects', meta, autoload=True)
- fkey = ForeignKeyConstraint(columns=[dns_domains_new.c.project_id],
- refcolumns=[projects.c.id])
- fkey.create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py b/nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py
deleted file mode 100644
index 82d66938c..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime
-from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # New tables
- quota_usages = Table('quota_usages', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('project_id',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('in_use', Integer(), nullable=False),
- Column('reserved', Integer(), nullable=False),
- Column('until_refresh', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- quota_usages.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_usages))
- raise
-
- reservations = Table('reservations', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('uuid',
- String(length=36, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), nullable=False),
- Column('usage_id', Integer(), ForeignKey('quota_usages.id'),
- nullable=False),
- Column('project_id',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('delta', Integer(), nullable=False),
- Column('expire', DateTime(timezone=False)),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- reservations.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(reservations))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_usages = Table('quota_usages', meta, autoload=True)
- try:
- quota_usages.drop()
- except Exception:
- LOG.error(_("quota_usages table not dropped"))
- raise
-
- reservations = Table('reservations', meta, autoload=True)
- try:
- reservations.drop()
- except Exception:
- LOG.error(_("reservations table not dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py b/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py
deleted file mode 100644
index 680b27df7..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright (c) 2012 Canonical Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Column, Table, MetaData, String, DateTime
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- attach_datetime = Column('attachtime_datetime', DateTime(timezone=False))
- attach_datetime.create(volumes)
-
- old_attachtime = volumes.c.attach_time
-
- try:
- volumes_list = list(volumes.select().execute())
- for v in volumes_list:
- attach_time = select([volumes.c.attach_time],
- volumes.c.id == v['id']).execute().fetchone()[0]
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(attachtime_datetime=attach_time).execute()
- except Exception:
- attach_datetime.drop()
- raise
-
- old_attachtime.alter(name='attach_time_old')
- attach_datetime.alter(name='attach_time')
- old_attachtime.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- attach_string = Column('attachtime_string', String(255))
- attach_string.create(volumes)
-
- old_attachtime = volumes.c.attach_time
-
- try:
- volumes_list = list(volumes.select().execute())
- for v in volumes_list:
- attach_time = select([volumes.c.attach_time],
- volumes.c.id == v['id']).execute().fetchone()[0]
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(attachtime_string=attach_time).execute()
- except Exception:
- attach_string.drop()
- raise
-
- old_attachtime.alter(name='attach_time_old')
- attach_string.alter(name='attach_time')
- old_attachtime.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py
deleted file mode 100644
index e5c2a275d..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_metadata = Table('instance_metadata', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(instance_metadata)
-
- try:
- instance_metadata.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == instance_metadata.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(instance_metadata.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[instance_metadata.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_metadata.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_metadata = Table('instance_metadata', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(instance_metadata)
-
- try:
- instance_metadata.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == instance_metadata.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- instance_metadata.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql
deleted file mode 100644
index 97b628c6e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql
+++ /dev/null
@@ -1,64 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_metadata_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- NULL,
- instance_uuid,
- key,
- value
- FROM instance_metadata;
-
- UPDATE instance_metadata_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE instance_metadata_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE instance_metadata;
-
- CREATE TABLE instance_metadata (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX instance_metadata_instance_id_idx ON instance_metadata(instance_id);
-
- INSERT INTO instance_metadata
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- key,
- value
- FROM instance_metadata_backup;
-
- DROP TABLE instance_metadata_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql
deleted file mode 100644
index 0d1e1ca8b..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql
+++ /dev/null
@@ -1,64 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_metadata_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- NULL,
- key,
- value
- FROM instance_metadata;
-
- UPDATE instance_metadata_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE instance_metadata_backup.instance_id = instances.id
- );
-
- DROP TABLE instance_metadata;
-
- CREATE TABLE instance_metadata (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX instance_metadata_instance_uuid_idx ON instance_metadata(instance_uuid);
-
- INSERT INTO instance_metadata
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- key,
- value
- FROM instance_metadata_backup;
-
- DROP TABLE instance_metadata_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
deleted file mode 100644
index 26b53bb7e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- sgia = Table('security_group_instance_association', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(sgia)
-
- try:
- sgia.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == sgia.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(sgia.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[sgia.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- sgia.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- sgia = Table('security_group_instance_association', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(sgia)
-
- try:
- sgia.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == sgia.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- sgia.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql
deleted file mode 100644
index 08aaa241c..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql
+++ /dev/null
@@ -1,61 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE security_group_instance_association_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO security_group_instance_association_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- NULL,
- instance_uuid
- FROM security_group_instance_association;
-
- UPDATE security_group_instance_association_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE security_group_instance_association_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE security_group_instance_association;
-
- CREATE TABLE security_group_instance_association (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX security_group_instance_association_security_group_id_idx ON security_group_instance_association(security_group_id);
- CREATE INDEX security_group_instance_association_instance_id_idx ON security_group_instance_association(instance_id);
-
- INSERT INTO security_group_instance_association
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- instance_id
- FROM security_group_instance_association_backup;
-
- DROP TABLE security_group_instance_association_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql
deleted file mode 100644
index d66c5ce37..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql
+++ /dev/null
@@ -1,61 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE security_group_instance_association_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO security_group_instance_association_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- instance_id,
- NULL
- FROM security_group_instance_association;
-
- UPDATE security_group_instance_association_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE security_group_instance_association_backup.instance_id = instances.id
- );
-
- DROP TABLE security_group_instance_association;
-
- CREATE TABLE security_group_instance_association (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX security_group_instance_association_security_group_id_idx ON security_group_instance_association(security_group_id);
- CREATE INDEX security_group_instance_association_instance_uuid_idx ON security_group_instance_association(instance_uuid);
-
- INSERT INTO security_group_instance_association
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- instance_uuid
- FROM security_group_instance_association_backup;
-
- DROP TABLE security_group_instance_association_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py
deleted file mode 100644
index 1cfa523c6..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- consoles = Table('consoles', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(consoles)
-
- try:
- consoles.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == consoles.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(consoles.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[consoles.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- consoles.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- consoles = Table('consoles', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(consoles)
-
- try:
- consoles.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == consoles.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- consoles.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql
deleted file mode 100644
index 50f260549..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql
+++ /dev/null
@@ -1,72 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE consoles_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id)
- );
-
- INSERT INTO consoles_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- NULL,
- instance_uuid,
- password,
- port,
- pool_id
- FROM consoles;
-
- UPDATE consoles_backup
- SET instance_uuid=
- (SELECT id
- FROM instances
- WHERE consoles_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE consoles;
-
- CREATE TABLE consoles (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_id INTEGER NOT NULL,
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX consoles_pool_id ON consoles(pool_id);
-
- INSERT INTO consoles
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- instance_id,
- password,
- port,
- pool_id
- FROM consoles_backup;
-
- DROP TABLE consoles_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql
deleted file mode 100644
index ef48162bc..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql
+++ /dev/null
@@ -1,72 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE consoles_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id)
- );
-
- INSERT INTO consoles_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- instance_id,
- NULL,
- password,
- port,
- pool_id
- FROM consoles;
-
- UPDATE consoles_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE consoles_backup.instance_id = instances.id
- );
-
- DROP TABLE consoles;
-
- CREATE TABLE consoles (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_uuid VARCHAR(36),
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX consoles_pool_id ON consoles(pool_id);
-
- INSERT INTO consoles
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- instance_uuid,
- password,
- port,
- pool_id
- FROM consoles_backup;
-
- DROP TABLE consoles_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py b/nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py
deleted file mode 100644
index 0eeac2587..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- for table in ['block_device_mapping',
- 'consoles',
- 'volumes']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- for table in ['block_device_mapping',
- 'consoles',
- 'volumes']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py b/nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py
deleted file mode 100644
index 4bf6b0484..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE(mikal): these weren't done in 103 because sqlite already has the
- # index.
- for table in ['instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- for table in ['instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
deleted file mode 100644
index c4c13e539..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
-
- # We need to remove the foreign key constraint or the column rename will
- # fail
- fkeys = list(instance_info_caches.c.instance_id.foreign_keys)
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[instance_info_caches.c.instance_id],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_info_caches.c.instance_id.alter(name='instance_uuid')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
-
- # We need to remove the foreign key constraint or the column rename will
- # fail
- fkeys = list(instance_info_caches.c.instance_uuid.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[instance_info_caches.c.instance_uuid],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_info_caches.c.instance_uuid.alter(name='instance_id')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql
deleted file mode 100644
index 563b1245a..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql
+++ /dev/null
@@ -1,50 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_info_caches_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_id VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_info_caches_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_uuid as instance_id
- FROM instance_info_caches;
-
- DROP TABLE instance_info_caches;
-
- CREATE TABLE instance_info_caches (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_id VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- CREATE INDEX instance_info_caches_instance_id_idx ON instance_info_caches(instance_id);
-
- INSERT INTO instance_info_caches
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_id
- FROM instance_info_caches_backup;
-
- DROP TABLE instance_info_caches_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql
deleted file mode 100644
index 4e675749e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql
+++ /dev/null
@@ -1,50 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_info_caches_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_info_caches_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_id as instance_uuid
- FROM instance_info_caches;
-
- DROP TABLE instance_info_caches;
-
- CREATE TABLE instance_info_caches (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- CREATE INDEX instance_info_caches_instance_uuid_idx ON instance_info_caches(instance_uuid);
-
- INSERT INTO instance_info_caches
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_uuid
- FROM instance_info_caches_backup;
-
- DROP TABLE instance_info_caches_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py b/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
deleted file mode 100644
index 2c483007c..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- for table in ['block_device_mapping',
- 'consoles',
- 'instance_info_caches',
- 'instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
-
- try:
- ForeignKeyConstraint(
- columns=[t.c.instance_uuid],
- refcolumns=[instances.c.uuid]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- for table in ['block_device_mapping',
- 'consoles',
- 'instance_info_caches',
- 'instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
-
- try:
- ForeignKeyConstraint(
- columns=[t.c.instance_uuid],
- refcolumns=[instances.c.uuid]).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py b/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
deleted file mode 100644
index 250906c62..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 SINA Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # create new table
- instance_id_mappings = Table('instance_id_mappings', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36), index=True, nullable=False))
- try:
- instance_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'instance_id_mappings'")
- meta.drop_all(tables=[instance_id_mappings])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE instance_id_mappings "
- "Engine=InnoDB")
-
- instances = Table('instances', meta, autoload=True)
- instance_id_mappings = Table('instance_id_mappings', meta, autoload=True)
-
- instance_list = list(instances.select().execute())
- for instance in instance_list:
- instance_id = instance['id']
- uuid = instance['uuid']
- row = instance_id_mappings.insert()
- row.execute({'id': instance_id, 'uuid': uuid})
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_id_mappings = Table('instance_id_mappings', meta, autoload=True)
- instance_id_mappings.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py b/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
deleted file mode 100644
index d8593bd77..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 SINA Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # create new table
- task_log = Table('task_log', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('task_name', String(255), nullable=False),
- Column('state', String(255), nullable=False),
- Column('host', String(255), index=True, nullable=False),
- Column('period_beginning', String(255),
- index=True, nullable=False),
- Column('period_ending', String(255), index=True, nullable=False),
- Column('message', String(255), nullable=False),
- Column('task_items', Integer()),
- Column('errors', Integer()),
- )
- try:
- task_log.create()
- except Exception:
- meta.drop_all(tables=[task_log])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE task_log "
- "Engine=InnoDB")
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- task_log = Table('task_log', meta, autoload=True)
- task_log.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py b/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
deleted file mode 100644
index a2b0792d3..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Table
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dns_domains = Table('dns_domains', meta, autoload=True)
- projects = Table('projects', meta, autoload=True)
-
- fkeys = list(dns_domains.c.project_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[dns_domains.c.project_id],
- refcolumns=[projects.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dns_domains = Table('dns_domains', meta, autoload=True)
- projects = Table('projects', meta, autoload=True)
-
- kwargs = {
- 'columns': [dns_domains.c.project_id],
- 'refcolumns': [projects.c.id],
- }
-
- if migrate_engine.name == 'mysql':
- # For MySQL we name our fkeys explicitly so they match Essex
- kwargs['name'] = 'dns_domains_ibfk_1'
-
- ForeignKeyConstraint(**kwargs).create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql
deleted file mode 100644
index ffb4d132e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql
+++ /dev/null
@@ -1,53 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE dns_domains_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain)
- );
-
- INSERT INTO dns_domains_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains;
-
- DROP TABLE dns_domains;
-
- CREATE TABLE dns_domains (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain),
- FOREIGN KEY (project_id) REFERENCES projects (id)
- );
-
- INSERT INTO dns_domains
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains_backup;
-
- DROP TABLE dns_domains_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql
deleted file mode 100644
index eeb481658..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql
+++ /dev/null
@@ -1,52 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE dns_domains_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain)
- );
-
- INSERT INTO dns_domains_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains;
-
- DROP TABLE dns_domains;
-
- CREATE TABLE dns_domains (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain)
- );
-
- INSERT INTO dns_domains
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains_backup;
-
- DROP TABLE dns_domains_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py b/nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py
deleted file mode 100644
index 734a3729f..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import (Boolean, Column, DateTime, ForeignKey,
- Index, MetaData, String, Table)
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- tables = (
- "user_project_role_association",
- "user_project_association",
- "user_role_association",
- "projects",
- "users",
- "auth_tokens",
- )
- for table_name in tables:
- Table(table_name, meta, autoload=True).drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- auth_tokens = Table('auth_tokens', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('token_hash', String(length=255), primary_key=True,
- nullable=False),
- Column('user_id', String(length=255)),
- Column('server_management_url', String(length=255)),
- Column('storage_url', String(length=255)),
- Column('cdn_management_url', String(length=255)),
- mysql_engine='InnoDB',
- )
-
- projects = Table('projects', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('description', String(length=255)),
- Column('project_manager', String(length=255), ForeignKey('users.id')),
- mysql_engine='InnoDB',
- )
-
- user_project_association = Table('user_project_association', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
- mysql_engine='InnoDB',
- )
-
- user_project_role_association = \
- Table('user_project_role_association', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
- mysql_engine='InnoDB',
- )
-
- user_role_association = Table('user_role_association', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('user_id', String(length=255), ForeignKey('users.id'),
- primary_key=True, nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
- mysql_engine='InnoDB',
- )
-
- users = Table('users', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('access_key', String(length=255)),
- Column('secret_key', String(length=255)),
- Column('is_admin', Boolean),
- mysql_engine='InnoDB',
- )
-
- tables = [users, projects, user_project_association,
- auth_tokens, user_project_role_association,
- user_role_association]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.exception('Exception while creating table.')
- raise
-
- if migrate_engine.name == 'mysql':
- index = Index('project_id', user_project_association.c.project_id)
- index.create(migrate_engine)
-
- fkeys = [
- [
- [user_project_role_association.c.user_id,
- user_project_role_association.c.project_id],
- [user_project_association.c.user_id,
- user_project_association.c.project_id],
- 'user_project_role_association_ibfk_1',
- ],
- [
- [user_project_association.c.user_id],
- [users.c.id],
- 'user_project_association_ibfk_1',
- ],
- [
- [user_project_association.c.project_id],
- [projects.c.id],
- 'user_project_association_ibfk_2',
- ],
- ]
-
- for fkey_pair in fkeys:
- if migrate_engine.name == 'mysql':
- # For MySQL we name our fkeys explicitly so they match Essex
- fkey = ForeignKeyConstraint(columns=fkey_pair[0],
- refcolumns=fkey_pair[1],
- name=fkey_pair[2])
- fkey.create()
- elif migrate_engine.name == 'postgresql':
- fkey = ForeignKeyConstraint(columns=fkey_pair[0],
- refcolumns=fkey_pair[1])
- fkey.create()
-
- # Hopefully this entire loop to set the charset can go away during
- # the "E" release compaction. See the notes on the dns_domains
- # table above for why this is required vs. setting mysql_charset inline.
- if migrate_engine.name == "mysql":
- tables = [
- # tables that are FK parents, must be converted early
- "projects",
- "user_project_association",
- "users",
- # those that are children and others later
- "auth_tokens",
- "user_project_role_association",
- "user_role_association",
- ]
- sql = "SET foreign_key_checks = 0;"
- for table in tables:
- sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table
- sql += "SET foreign_key_checks = 1;"
- sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \
- % migrate_engine.url.database
- migrate_engine.execute(sql)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py b/nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py
deleted file mode 100644
index 27ad13e91..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-from sqlalchemy import MetaData, Table
-from sqlalchemy import and_, between
-
-
-TABLES = ('instance_metadata',
- 'instance_system_metadata',
- 'block_device_mapping')
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- instance_list = list(instances.select().
- where(instances.c.deleted == True).execute())
- for table_name in TABLES:
- table = Table(table_name, meta, autoload=True)
-
- for instance in instance_list:
- if not instance['deleted_at']:
- continue
- table.update(
- (and_(table.c.deleted == True,
- table.c.instance_uuid == instance['uuid'],
- between(table.c.deleted_at,
- instance['deleted_at'] - datetime.timedelta(seconds=2),
- instance['deleted_at'] + datetime.timedelta(seconds=2)))
- ),
- {table.c.deleted: False,
- table.c.deleted_at: None}
- ).execute()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- instance_list = list(instances.select().
- where(instances.c.deleted == True).execute())
- for table_name in TABLES:
- table = Table(table_name, meta, autoload=True)
- for instance in instance_list:
- table.update(
- (and_(table.c.deleted == False,
- table.c.instance_uuid == instance['uuid'])
- ),
- {table.c.deleted: True,
- table.c.deleted_at: instance['deleted_at']}
- ).execute()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py
deleted file mode 100644
index d51bbb912..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(fixed_ips)
-
- try:
- fixed_ips.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == fixed_ips.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(fixed_ips.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- fixed_ips.c.instance_id.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_uuid],
- refcolumns=[instances.c.uuid]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(fixed_ips)
-
- fkeys = list(fixed_ips.c.instance_uuid.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_uuid],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- try:
- fixed_ips.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == fixed_ips.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- fixed_ips.c.instance_uuid.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_id],
- refcolumns=[instances.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql
deleted file mode 100644
index 0a7a7bed9..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql
+++ /dev/null
@@ -1,85 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE fixed_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- NULL,
- instance_uuid,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips;
-
- UPDATE fixed_ips_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE fixed_ips_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX fixed_ips_id ON fixed_ips(id);
- CREATE INDEX address ON fixed_ips(address);
-
- INSERT INTO fixed_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql
deleted file mode 100644
index 417b5bfe3..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql
+++ /dev/null
@@ -1,85 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE fixed_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- NULL,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips;
-
- UPDATE fixed_ips_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE fixed_ips_backup.instance_id = instances.id
- );
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_uuid VARCHAR(36),
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX fixed_ips_id ON fixed_ips(id);
- CREATE INDEX address ON fixed_ips(address);
-
- INSERT INTO fixed_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_uuid,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql
deleted file mode 100644
index bb210025a..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql
+++ /dev/null
@@ -1,71 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- NULL,
- instance_uuid,
- uuid
- FROM virtual_interfaces;
-
- UPDATE virtual_interfaces_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE virtual_interfaces_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id VARCHAR(36) NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX virtual_interfaces_instance_id ON
- virtual_interfaces(instance_id);
- CREATE INDEX virtual_interfaces_network_id ON
- virtual_interfaces(network_id);
-
- INSERT INTO virtual_interfaces
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql
deleted file mode 100644
index 5ee98d5c1..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql
+++ /dev/null
@@ -1,71 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- NULL,
- uuid
- FROM virtual_interfaces;
-
- UPDATE virtual_interfaces_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE virtual_interfaces_backup.instance_id = instances.id
- );
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_uuid VARCHAR(36) NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX virtual_interfaces_instance_uuid ON
- virtual_interfaces(instance_uuid);
- CREATE INDEX virtual_interfaces_network_id ON
- virtual_interfaces(network_id);
-
- INSERT INTO virtual_interfaces
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_uuid,
- uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py
deleted file mode 100644
index 8f7ad1a15..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(virtual_interfaces)
-
- try:
- virtual_interfaces.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == virtual_interfaces.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(virtual_interfaces.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- virtual_interfaces.c.instance_id.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_uuid],
- refcolumns=[instances.c.uuid]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(virtual_interfaces)
-
- fkeys = list(virtual_interfaces.c.instance_uuid.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_uuid],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- try:
- virtual_interfaces.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == virtual_interfaces.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- virtual_interfaces.c.instance_uuid.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_id],
- refcolumns=[instances.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py
deleted file mode 100644
index 447307952..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Add 'user_id' column to quota_usages table.
- quota_usages = Table('quota_usages', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- quota_usages.create_column(user_id)
-
- # Add 'user_id' column to reservations table.
- reservations = Table('reservations', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- reservations.create_column(user_id)
-
- # New table.
- user_quotas = Table('user_quotas', meta,
- Column('id', Integer(), primary_key=True),
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(), default=False),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('resource',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=False),
- Column('hard_limit', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- user_quotas.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(user_quotas))
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_usages = Table('quota_usages', meta, autoload=True)
- quota_usages.drop_column('user_id')
-
- reservations = Table('reservations', meta, autoload=True)
- reservations.drop_column('user_id')
-
- user_quotas = Table('user_quotas', meta, autoload=True)
- try:
- user_quotas.drop()
- except Exception:
- LOG.error(_("user_quotas table not dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py
deleted file mode 100644
index ccf9d66b8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Reverse the previous migration
- meta = MetaData()
- meta.bind = migrate_engine
-
- reservations = Table('reservations', meta, autoload=True)
- d = reservations.delete(reservations.c.deleted == True)
- d.execute()
- reservations.drop_column('user_id')
-
- quota_usages = Table('quota_usages', meta, autoload=True)
- d = quota_usages.delete(quota_usages.c.user_id != None)
- d.execute()
- quota_usages.drop_column('user_id')
-
- user_quotas = Table('user_quotas', meta, autoload=True)
- try:
- user_quotas.drop()
- except Exception:
- LOG.error(_("user_quotas table not dropped"))
- raise
-
-
-def downgrade(migrate_engine):
- # Undo the reversal of the previous migration
- # (data is not preserved)
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Add 'user_id' column to quota_usages table.
- quota_usages = Table('quota_usages', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- quota_usages.create_column(user_id)
-
- # Add 'user_id' column to reservations table.
- reservations = Table('reservations', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- reservations.create_column(user_id)
-
- # New table.
- user_quotas = Table('user_quotas', meta,
- Column('id', Integer(), primary_key=True),
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(), default=False),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('resource',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=False),
- Column('hard_limit', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- user_quotas.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(user_quotas))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py b/nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py
deleted file mode 100644
index 5b0e19660..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (c) 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import Index, MetaData, String, Table
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- # create new table
- compute_node_stats = Table('compute_node_stats', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False,
- autoincrement=True),
- Column('compute_node_id', Integer, index=True, nullable=False),
- Column('key', String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), nullable=False),
- Column('value', String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- mysql_engine='InnoDB')
- try:
- compute_node_stats.create()
- except Exception:
- LOG.exception("Exception while creating table 'compute_node_stats'")
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- compute_node_stats = Table('compute_node_stats', meta, autoload=True)
- compute_node_stats.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py
deleted file mode 100644
index 432fd91a0..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('block_device_mapping', meta, autoload=True)
-
- # Based on block_device_mapping_update_or_create
- # from: nova/db/sqlalchemy/api.py
- i = Index('block_device_mapping_instance_uuid_device_name_idx',
- t.c.instance_uuid, t.c.device_name)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on block_device_mapping_update_or_create
- # from: nova/db/sqlalchemy/api.py
- i = Index(
- 'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
- t.c.instance_uuid, t.c.virtual_name, t.c.device_name)
- i.create(migrate_engine)
-
- # Based on block_device_mapping_destroy_by_instance_and_volume
- # from: nova/db/sqlalchemy/api.py
- i = Index('block_device_mapping_instance_uuid_volume_id_idx',
- t.c.instance_uuid, t.c.volume_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('block_device_mapping', meta, autoload=True)
-
- i = Index('block_device_mapping_instance_uuid_device_name_idx',
- t.c.instance_uuid, t.c.device_name)
- i.drop(migrate_engine)
-
- i = Index(
- 'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
- t.c.instance_uuid, t.c.virtual_name, t.c.device_name)
- i.drop(migrate_engine)
-
- i = Index('block_device_mapping_instance_uuid_volume_id_idx',
- t.c.instance_uuid, t.c.volume_id)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py
deleted file mode 100644
index 1345e5396..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError, OperationalError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on bw_usage_get_by_uuids
- # from: nova/db/sqlalchemy/api.py
- t = Table('bw_usage_cache', meta, autoload=True)
- i = Index('bw_usage_cache_uuid_start_period_idx',
- t.c.uuid, t.c.start_period)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('bw_usage_cache', meta, autoload=True)
- i = Index('bw_usage_cache_uuid_start_period_idx',
- t.c.uuid, t.c.start_period)
- if migrate_engine.url.get_dialect().name.startswith('sqlite'):
- try:
- i.drop(migrate_engine)
- except OperationalError:
- # Sqlite is very broken for any kind of table modification.
- # adding columns creates a new table, then copies the data,
- # and looses the indexes.
- # Thus later migrations that add columns will cause the
- # earlier migration's downgrade unittests to fail on
- # dropping indexes.
- # Honestly testing migrations on sqlite is not really a very
- # valid test (because of above facts), but that is for
- # another day. (mdragon)
- pass
- else:
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py b/nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py
deleted file mode 100644
index 1201ce6be..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('certificates', meta, autoload=True)
-
- # Based on certificate_get_all_by_project
- # from: nova/db/sqlalchemy/api.py
- i = Index('certificates_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on certificate_get_all_by_user
- # from: nova/db/sqlalchemy/api.py
- i = Index('certificates_user_id_deleted_idx',
- t.c.user_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('certificates', meta, autoload=True)
-
- i = Index('certificates_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('certificates_user_id_deleted_idx',
- t.c.user_id, t.c.deleted)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py
deleted file mode 100644
index 0ae4a4d51..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('fixed_ips', meta, autoload=True)
-
- # Based on network_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on fixed_ip_get_by_network_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_network_id_host_deleted_idx',
- t.c.network_id, t.c.host, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on fixed_ip_associate
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_address_reserved_network_id_deleted_idx',
- t.c.address, t.c.reserved, t.c.network_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('fixed_ips', meta, autoload=True)
-
- # Based on network_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- # Based on fixed_ip_get_by_network_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_network_id_host_deleted_idx',
- t.c.network_id, t.c.host, t.c.deleted)
- i.drop(migrate_engine)
-
- # Based on fixed_ip_associate
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_address_reserved_network_id_deleted_idx',
- t.c.address, t.c.reserved, t.c.network_id, t.c.deleted)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py
deleted file mode 100644
index b953b28b9..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('floating_ips', meta, autoload=True)
-
- # Based on floating_ip_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('floating_ips_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on floating_ip_get_all_by_project
- # from: nova/db/sqlalchemy/api.py
- i = Index('floating_ips_project_id_idx', t.c.project_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on floating_ip_allocate_address
- # from: nova/db/sqlalchemy/api.py
- i = Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
- t.c.pool, t.c.deleted, t.c.fixed_ip_id, t.c.project_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('floating_ips', meta, autoload=True)
-
- i = Index('floating_ips_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- i = Index('floating_ips_project_id_idx', t.c.project_id)
- i.drop(migrate_engine)
-
- i = Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
- t.c.pool, t.c.deleted, t.c.fixed_ip_id, t.c.project_id)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py b/nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py
deleted file mode 100644
index 3ed8277a6..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on instance_fault_get_by_instance_uuids
- # from: nova/db/sqlalchemy/api.py
- t = Table('instance_faults', meta, autoload=True)
- i = Index('instance_faults_instance_uuid_deleted_created_at_idx',
- t.c.instance_uuid, t.c.deleted, t.c.created_at)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instance_faults', meta, autoload=True)
- i = Index('instance_faults_instance_uuid_deleted_created_at_idx',
- t.c.instance_uuid, t.c.deleted, t.c.created_at)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py
deleted file mode 100644
index 80ef0f983..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on instance_type_extra_specs_get_item
- # from: nova/db/sqlalchemy/api.py
- t = Table('instance_type_extra_specs', meta, autoload=True)
- i = Index('instance_type_extra_specs_instance_type_id_key_idx',
- t.c.instance_type_id, t.c.key)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instance_type_extra_specs', meta, autoload=True)
- i = Index('instance_type_extra_specs_instance_type_id_key_idx',
- t.c.instance_type_id, t.c.key)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py
deleted file mode 100644
index a429a7685..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instances', meta, autoload=True)
-
- # Based on service_get_all_compute_sorted
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_host_deleted_idx',
- t.c.host, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on instance_get_all_by_reservation
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_reservation_id_idx', t.c.reservation_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on instance_get_active_by_window
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_terminated_at_launched_at_idx',
- t.c.terminated_at, t.c.launched_at)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on security_group_in_use
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_uuid_deleted_idx',
- t.c.uuid, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on instance_get_all_hung_in_rebooting
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_task_state_updated_at_idx',
- t.c.task_state, t.c.updated_at)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instances', meta, autoload=True)
-
- i = Index('instances_host_deleted_idx',
- t.c.host, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('instances_reservation_id_idx', t.c.reservation_id)
- i.drop(migrate_engine)
-
- i = Index('instances_terminated_at_launched_at_idx',
- t.c.terminated_at, t.c.launched_at)
- i.drop(migrate_engine)
-
- i = Index('instances_uuid_deleted_idx',
- t.c.uuid, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('instances_task_state_updated_at_idx',
- t.c.task_state, t.c.updated_at)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py b/nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py
deleted file mode 100644
index e904742ae..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('iscsi_targets', meta, autoload=True)
-
- # Based on iscsi_target_count_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('iscsi_targets_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on volume_allocate_iscsi_target
- # from: nova/db/sqlalchemy/api.py
- i = Index('iscsi_targets_host_volume_id_deleted_idx',
- t.c.host, t.c.volume_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('iscsi_targets', meta, autoload=True)
-
- i = Index('iscsi_targets_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- i = Index('iscsi_targets_host_volume_id_deleted_idx',
- t.c.host, t.c.volume_id, t.c.deleted)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py
deleted file mode 100644
index 11a9dde86..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('networks', meta, autoload=True)
-
- # Based on network_get_by_bridge
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_bridge_deleted_idx',
- t.c.bridge, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_query
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_get_all_by_uuids
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_uuid_project_id_deleted_idx',
- t.c.uuid, t.c.project_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_create_safe
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_vlan_deleted_idx',
- t.c.vlan, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_get_by_cidr
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_cidr_v6_idx', t.c.cidr_v6)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('networks', meta, autoload=True)
-
- i = Index('networks_bridge_deleted_idx',
- t.c.bridge, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- i = Index('networks_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_uuid_project_id_deleted_idx',
- t.c.uuid, t.c.project_id, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_vlan_deleted_idx',
- t.c.vlan, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_cidr_v6_idx', t.c.cidr_v6)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py b/nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py
deleted file mode 100644
index 312ebbfc1..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, String, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- is_public = Column('is_public', Boolean)
-
- instance_types.create_column(is_public)
- instance_types.update().values(is_public=True).execute()
-
- # New table.
- instance_type_projects = Table('instance_type_projects', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(), default=False),
- Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_type_id',
- Integer,
- ForeignKey('instance_types.id'),
- nullable=False),
- Column('project_id', String(length=255)),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- try:
- instance_type_projects.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(instance_type_projects))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- is_public = Column('is_public', Boolean)
-
- instance_types.drop_column(is_public)
-
- instance_type_projects = Table(
- 'instance_type_projects', meta, autoload=True)
- instance_type_projects.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py b/nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py
deleted file mode 100644
index b6cf56d47..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import String, Column, MetaData, Table
-from migrate.changeset import UniqueConstraint
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dialect = migrate_engine.url.get_dialect().name
-
- aggregates = Table('aggregates', meta, autoload=True)
- if dialect.startswith('sqlite'):
- aggregates.c.name.alter(unique=False)
- elif dialect.startswith('postgres'):
- ucon = UniqueConstraint('name',
- name='aggregates_name_key',
- table=aggregates)
- ucon.drop()
-
- else:
- col2 = aggregates.c.name
- UniqueConstraint(col2, name='name').drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- aggregates = Table('aggregates', meta, autoload=True)
- aggregates.c.name.alter(unique=True)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py b/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py
index 971fa3626..44eac3695 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py
@@ -14,21 +14,28 @@
# License for the specific language governing permissions and limitations
# under the License.
+from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Float, ForeignKey
from sqlalchemy import Index, Integer, MetaData, String, Table, Text
+from sqlalchemy import dialects
-from nova import flags
from nova.openstack.common import log as logging
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
+# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
+# Schema. In Folsom we created tables as latin1 and converted them to utf8
+# later. This conversion causes some of the Text columns on MySQL to get
+# created as mediumtext instead of just text.
+def MediumText():
+ return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
+
+
def _populate_instance_types(instance_types_table):
default_inst_types = {
'm1.tiny': dict(mem=512, vcpus=1, root_gb=0, eph_gb=0, flavid=1),
@@ -47,10 +54,12 @@ def _populate_instance_types(instance_types_table):
'ephemeral_gb': values["eph_gb"],
'rxtx_factor': 1,
'swap': 0,
- 'flavorid': values["flavid"]})
+ 'flavorid': values["flavid"],
+ 'disabled': False,
+ 'is_public': True})
except Exception:
LOG.info(repr(instance_types_table))
- LOG.exception('Exception while seeding instance_types table')
+ LOG.exception(_('Exception while seeding instance_types table'))
raise
@@ -71,7 +80,7 @@ def upgrade(migrate_engine):
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
@@ -80,11 +89,11 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('host', String(length=255), unique=True),
+ Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
@@ -98,7 +107,7 @@ def upgrade(migrate_engine):
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
@@ -107,26 +116,10 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('name', String(length=255), unique=True),
- Column('operational_state', String(length=255), nullable=False),
+ Column('name', String(length=255)),
Column('availability_zone', String(length=255), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
- )
-
- auth_tokens = Table('auth_tokens', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('token_hash', String(length=255), primary_key=True,
- nullable=False),
- Column('user_id', String(length=255)),
- Column('server_management_url', String(length=255)),
- Column('storage_url', String(length=255)),
- Column('cdn_management_url', String(length=255)),
- mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
@@ -135,20 +128,17 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_id', Integer, ForeignKey('instances.id'),
- nullable=False),
Column('device_name', String(length=255), nullable=False),
Column('delete_on_termination', Boolean),
Column('virtual_name', String(length=255)),
- Column('snapshot_id', Integer, ForeignKey('snapshots.id'),
- nullable=True),
- Column('volume_id', Integer(), ForeignKey('volumes.id'),
- nullable=True),
+ Column('snapshot_id', String(length=36), nullable=True),
+ Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
- Column('connection_info', Text),
+ Column('connection_info', MediumText()),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
@@ -162,8 +152,9 @@ def upgrade(migrate_engine):
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
+ Column('uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
cells = Table('cells', meta,
@@ -196,7 +187,20 @@ def upgrade(migrate_engine):
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ compute_node_stats = Table('compute_node_stats', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('compute_node_id', Integer, nullable=False),
+ Column('key', String(length=255), nullable=False),
+ Column('value', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
@@ -212,9 +216,9 @@ def upgrade(migrate_engine):
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
- Column('hypervisor_type', Text, nullable=False),
+ Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
- Column('cpu_info', Text, nullable=False),
+ Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
@@ -222,7 +226,7 @@ def upgrade(migrate_engine):
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
@@ -239,7 +243,7 @@ def upgrade(migrate_engine):
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
consoles = Table('consoles', meta,
@@ -249,31 +253,25 @@ def upgrade(migrate_engine):
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
- Column('instance_id', Integer),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- # NOTE(dprince): Trying to create a fresh utf8 dns_domains tables
- # with a domain primary key length of 512 fails w/
- # 'Specified key was too long; max key length is 767 bytes'.
- # See: https://bugs.launchpad.net/nova/+bug/993663
- # If we fix this during Folsom we can set mysql_charset=utf8 inline...
- # and remove the unsightly loop that does it below during "E" compaction.
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('domain', String(length=512), primary_key=True, nullable=False),
+ Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
- Column('project_id', String(length=255), ForeignKey('projects.id')),
+ Column('project_id', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
@@ -284,14 +282,14 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
- Column('instance_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
@@ -308,34 +306,32 @@ def upgrade(migrate_engine):
Column('pool', String(length=255)),
Column('interface', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- instance_actions = Table('instance_actions', meta,
+ instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('action', String(length=255)),
- Column('error', Text),
Column('instance_uuid', String(length=36)),
+ Column('code', Integer, nullable=False),
+ Column('message', String(length=255)),
+ Column('details', MediumText()),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- instance_faults = Table('instance_faults', meta,
+ instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_uuid', String(length=36)),
- Column('code', Integer, nullable=False),
- Column('message', String(length=255)),
- Column('details', Text),
+ Column('uuid', String(36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
@@ -344,10 +340,10 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('network_info', Text),
- Column('instance_id', String(36), nullable=False, unique=True),
+ Column('network_info', MediumText()),
+ Column('instance_uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instance_metadata = Table('instance_metadata', meta,
@@ -356,12 +352,24 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_id', Integer, ForeignKey('instances.id'),
- nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
+ Column('instance_uuid', String(length=36), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ instance_system_metadata = Table('instance_system_metadata', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('instance_uuid', String(length=36), nullable=False),
+ Column('key', String(length=255), nullable=False),
+ Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
@@ -375,7 +383,19 @@ def upgrade(migrate_engine):
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ instance_type_projects = Table('instance_type_projects', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('instance_type_id', Integer, nullable=False),
+ Column('project_id', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
@@ -393,8 +413,10 @@ def upgrade(migrate_engine):
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
+ Column('disabled', Boolean),
+ Column('is_public', Boolean),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instances = Table('instances', meta,
@@ -412,14 +434,14 @@ def upgrade(migrate_engine):
Column('server_name', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
- Column('key_data', Text),
+ Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
- Column('user_data', Text),
+ Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
@@ -429,7 +451,7 @@ def upgrade(migrate_engine):
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
- Column('launched_on', Text),
+ Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
@@ -449,7 +471,7 @@ def upgrade(migrate_engine):
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
iscsi_targets = Table('iscsi_targets', meta,
@@ -460,10 +482,9 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
- Column('volume_id', Integer, ForeignKey('volumes.id'),
- nullable=True),
+ Column('volume_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
@@ -475,9 +496,9 @@ def upgrade(migrate_engine):
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
- Column('public_key', Text),
+ Column('public_key', MediumText()),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
migrations = Table('migrations', meta,
@@ -494,7 +515,7 @@ def upgrade(migrate_engine):
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
networks = Table('networks', meta,
@@ -528,34 +549,49 @@ def upgrade(migrate_engine):
Column('priority', Integer),
Column('rxtx_base', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- projects = Table('projects', meta,
+ provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('description', String(length=255)),
- Column('project_manager', String(length=255), ForeignKey('users.id')),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('protocol', String(length=5)),
+ Column('from_port', Integer),
+ Column('to_port', Integer),
+ Column('cidr', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- provider_fw_rules = Table('provider_fw_rules', meta,
+ quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('protocol', String(length=5)),
- Column('from_port', Integer),
- Column('to_port', Integer),
- Column('cidr', String(length=255)),
+ Column('class_name', String(length=255)),
+ Column('resource', String(length=255)),
+ Column('hard_limit', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ quota_usages = Table('quota_usages', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('project_id', String(length=255)),
+ Column('resource', String(length=255)),
+ Column('in_use', Integer, nullable=False),
+ Column('reserved', Integer, nullable=False),
+ Column('until_refresh', Integer),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
quotas = Table('quotas', meta,
@@ -568,7 +604,23 @@ def upgrade(migrate_engine):
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ reservations = Table('reservations', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('uuid', String(length=36), nullable=False),
+ Column('usage_id', Integer, nullable=False),
+ Column('project_id', String(length=255)),
+ Column('resource', String(length=255)),
+ Column('delta', Integer, nullable=False),
+ Column('expire', DateTime),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
@@ -579,7 +631,7 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
security_group_instance_association = \
@@ -590,9 +642,9 @@ def upgrade(migrate_engine):
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer, ForeignKey('security_groups.id')),
- Column('instance_id', Integer, ForeignKey('instances.id')),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
@@ -608,7 +660,7 @@ def upgrade(migrate_engine):
Column('cidr', String(length=255)),
Column('group_id', Integer, ForeignKey('security_groups.id')),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
@@ -622,7 +674,7 @@ def upgrade(migrate_engine):
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
services = Table('services', meta,
@@ -638,19 +690,7 @@ def upgrade(migrate_engine):
Column('disabled', Boolean),
Column('availability_zone', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
- )
-
- sm_flavors = Table('sm_flavors', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', Integer, primary_key=True, nullable=False),
- Column('label', String(length=255)),
- Column('description', String(length=255)),
- mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
sm_backend_config = Table('sm_backend_config', meta,
@@ -665,94 +705,80 @@ def upgrade(migrate_engine):
Column('sr_type', String(length=255)),
Column('config_params', String(length=2047)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
- )
-
- sm_volume = Table('sm_volume', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', Integer(), ForeignKey('volumes.id'), primary_key=True,
- nullable=False, autoincrement=False),
- Column('backend_id', Integer, ForeignKey('sm_backend_config.id'),
- nullable=False),
- Column('vdi_uuid', String(length=255)),
- mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- snapshots = Table('snapshots', meta,
+ sm_flavors = Table('sm_flavors', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('volume_id', Integer, nullable=False),
- Column('user_id', String(length=255)),
- Column('project_id', String(length=255)),
- Column('status', String(length=255)),
- Column('progress', String(length=255)),
- Column('volume_size', Integer),
- Column('scheduled_at', DateTime),
- Column('display_name', String(length=255)),
- Column('display_description', String(length=255)),
+ Column('label', String(length=255)),
+ Column('description', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- user_project_association = Table('user_project_association', meta,
+ sm_volume = Table('sm_volume', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
+ Column('id', String(length=36), primary_key=True,
+ nullable=False, autoincrement=False),
+ Column('backend_id', Integer, nullable=False),
+ Column('vdi_uuid', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- user_project_role_association = \
- Table('user_project_role_association', meta,
+ snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- user_role_association = Table('user_role_association', meta,
+ snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('user_id', String(length=255), ForeignKey('users.id'),
- primary_key=True, nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
+ Column('id', String(length=36), primary_key=True, nullable=False),
+ Column('volume_id', String(length=36), nullable=False),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('status', String(length=255)),
+ Column('progress', String(length=255)),
+ Column('volume_size', Integer),
+ Column('scheduled_at', DateTime),
+ Column('display_name', String(length=255)),
+ Column('display_description', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- users = Table('users', meta,
+ task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('access_key', String(length=255)),
- Column('secret_key', String(length=255)),
- Column('is_admin', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('task_name', String(length=255), nullable=False),
+ Column('state', String(length=255), nullable=False),
+ Column('host', String(length=255), nullable=False),
+ Column('period_beginning', String(length=255), nullable=False),
+ Column('period_ending', String(length=255), nullable=False),
+ Column('message', String(length=255), nullable=False),
+ Column('task_items', Integer),
+ Column('errors', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
@@ -763,10 +789,10 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255), unique=True),
Column('network_id', Integer),
- Column('instance_id', Integer, nullable=False),
Column('uuid', String(length=36)),
+ Column('instance_uuid', String(length=36), nullable=True),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
virtual_storage_arrays = Table('virtual_storage_arrays', meta,
@@ -785,18 +811,18 @@ def upgrade(migrate_engine):
Column('vol_count', Integer, nullable=False),
Column('status', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- volume_types = Table('volume_types', meta,
+ volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('name', String(length=255)),
+ Column('uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
volume_metadata = Table('volume_metadata', meta,
@@ -805,12 +831,11 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('volume_id', Integer, ForeignKey('volumes.id'),
- nullable=False),
+ Column('volume_id', String(length=36), nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
volume_type_extra_specs = Table('volume_type_extra_specs', meta,
@@ -824,24 +849,33 @@ def upgrade(migrate_engine):
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- volumes = Table('volumes', meta,
+ volume_types = Table('volume_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
+ Column('name', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ volumes = Table('volumes', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
- Column('instance_id', Integer, ForeignKey('instances.id')),
Column('mountpoint', String(length=255)),
- Column('attach_time', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
@@ -851,10 +885,12 @@ def upgrade(migrate_engine):
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
- Column('snapshot_id', Integer),
+ Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
+ Column('instance_uuid', String(length=36)),
+ Column('attach_time', DateTime),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instances.create()
@@ -863,21 +899,24 @@ def upgrade(migrate_engine):
# create all tables
tables = [aggregates, console_pools, instance_types,
- users, projects, security_groups, sm_flavors, sm_backend_config,
- snapshots, user_project_association, volume_types,
+ security_groups, sm_flavors, sm_backend_config,
+ snapshots, volume_types,
volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
- auth_tokens, block_device_mapping, bw_usage_cache, cells,
- certificates, compute_nodes, consoles, dns_domains, fixed_ips,
- floating_ips, instance_actions, instance_faults,
- instance_info_caches, instance_metadata,
- instance_type_extra_specs, iscsi_targets, key_pairs,
- migrations, networks, provider_fw_rules,
- quotas, s3_images, security_group_instance_association,
+ block_device_mapping, bw_usage_cache, cells,
+ certificates, compute_node_stats, compute_nodes, consoles,
+ dns_domains, fixed_ips, floating_ips,
+ instance_faults, instance_id_mappings, instance_info_caches,
+ instance_metadata, instance_system_metadata,
+ instance_type_extra_specs, instance_type_projects,
+ iscsi_targets, key_pairs, migrations, networks,
+ provider_fw_rules, quota_classes, quota_usages, quotas,
+ reservations, s3_images, security_group_instance_association,
security_group_rules, services, sm_volume,
- user_project_role_association, user_role_association,
- virtual_interfaces, virtual_storage_arrays, volume_metadata,
+ snapshot_id_mappings, task_log,
+ virtual_interfaces,
+ virtual_storage_arrays, volume_id_mappings, volume_metadata,
volume_type_extra_specs]
for table in tables:
@@ -885,103 +924,302 @@ def upgrade(migrate_engine):
table.create()
except Exception:
LOG.info(repr(table))
- LOG.exception('Exception while creating table.')
+ LOG.exception(_('Exception while creating table.'))
raise
- # MySQL specific Indexes from Essex
- # NOTE(dprince): I think some of these can be removed in Folsom
indexes = [
+ # agent_builds
+ Index('agent_builds_hypervisor_os_arch_idx',
+ agent_builds.c.hypervisor,
+ agent_builds.c.os,
+ agent_builds.c.architecture),
+
+ # aggregate_metadata
+ Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
+
+ # block_device_mapping
+ Index('block_device_mapping_instance_uuid_idx',
+ block_device_mapping.c.instance_uuid),
+
+ Index('block_device_mapping_instance_uuid_device_name_idx',
+ block_device_mapping.c.instance_uuid,
+ block_device_mapping.c.device_name),
+ Index(
+ 'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
+ block_device_mapping.c.instance_uuid,
+ block_device_mapping.c.virtual_name,
+ block_device_mapping.c.device_name),
+
+ Index('block_device_mapping_instance_uuid_volume_id_idx',
+ block_device_mapping.c.instance_uuid,
+ block_device_mapping.c.volume_id),
+
+ # bw_usage_cache
+ Index('bw_usage_cache_uuid_start_period_idx',
+ bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
+
+ # certificates
+ Index('certificates_project_id_deleted_idx',
+ certificates.c.project_id, certificates.c.deleted),
+
+ Index('certificates_user_id_deleted_idx',
+ certificates.c.user_id, certificates.c.deleted),
+
+ # compute_node_stats
+ Index('ix_compute_node_stats_compute_node_id',
+ compute_node_stats.c.compute_node_id),
+
+ # consoles
+ Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
+
+ # dns_domains
+ Index('dns_domains_domain_deleted_idx',
+ dns_domains.c.domain, dns_domains.c.deleted),
+
+ # fixed_ips
+ Index('fixed_ips_host_idx', fixed_ips.c.host),
+ Index('fixed_ips_network_id_host_deleted_idx',
+ fixed_ips.c.network_id, fixed_ips.c.host, fixed_ips.c.deleted),
+ Index('fixed_ips_address_reserved_network_id_deleted_idx',
+ fixed_ips.c.address, fixed_ips.c.reserved,
+ fixed_ips.c.network_id, fixed_ips.c.deleted),
+
+ # floating_ips
+ Index('floating_ips_host_idx', floating_ips.c.host),
+
+ Index('floating_ips_project_id_idx', floating_ips.c.project_id),
+
+ Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
+ floating_ips.c.pool, floating_ips.c.deleted,
+ floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
+
+ # instance_faults
+ Index('instance_faults_instance_uuid_deleted_created_at_idx',
+ instance_faults.c.instance_uuid, instance_faults.c.deleted,
+ instance_faults.c.created_at),
+
+ # instance_type_extra_specs
+ Index('instance_type_extra_specs_instance_type_id_key_idx',
+ instance_type_extra_specs.c.instance_type_id,
+ instance_type_extra_specs.c.key),
+
+ # instance_id_mappings
+ Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
+
+ # instance_metadata
+ Index('instance_metadata_instance_uuid_idx',
+ instance_metadata.c.instance_uuid),
+
+ # instances
+ Index('instances_host_deleted_idx',
+ instances.c.host, instances.c.deleted),
+
+ Index('instances_reservation_id_idx', instances.c.reservation_id),
+
+ Index('instances_terminated_at_launched_at_idx',
+ instances.c.terminated_at, instances.c.launched_at),
+
+ Index('instances_uuid_deleted_idx',
+ instances.c.uuid, instances.c.deleted),
+
+ Index('instances_task_state_updated_at_idx',
+ instances.c.task_state, instances.c.updated_at),
+
+
+ # iscsi_targets
+ Index('iscsi_targets_host_idx', iscsi_targets.c.host),
+
+ Index('iscsi_targets_host_volume_id_deleted_idx',
+ iscsi_targets.c.host, iscsi_targets.c.volume_id,
+ iscsi_targets.c.deleted),
+
+ # key_pairs
+ Index('key_pair_user_id_name_idx',
+ key_pairs.c.user_id, key_pairs.c.name),
+
+ # networks
+ Index('networks_bridge_deleted_idx',
+ networks.c.bridge, networks.c.deleted),
+
+ Index('networks_host_idx', networks.c.host),
+
+ Index('networks_project_id_deleted_idx',
+ networks.c.project_id, networks.c.deleted),
+
+ Index('networks_uuid_project_id_deleted_idx',
+ networks.c.uuid, networks.c.project_id, networks.c.deleted),
+
+ Index('networks_vlan_deleted_idx',
+ networks.c.vlan, networks.c.deleted),
+
+ Index('networks_cidr_v6_idx', networks.c.cidr_v6),
+
+ # reservations
+ Index('ix_reservations_project_id', reservations.c.project_id),
+
+ # security_group_instance_association
+ Index('security_group_instance_association_instance_uuid_idx',
+ security_group_instance_association.c.instance_uuid),
+
+ # quota_classes
+ Index('ix_quota_classes_class_name', quota_classes.c.class_name),
+
+ # quota_usages
+ Index('ix_quota_usages_project_id', quota_usages.c.project_id),
+
+
+ # volumes
+ Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
+
+ # task_log
+ Index('ix_task_log_period_beginning', task_log.c.period_beginning),
+ Index('ix_task_log_host', task_log.c.host),
+ Index('ix_task_log_period_ending', task_log.c.period_ending),
+
+ ]
+
+ mysql_indexes = [
+ # TODO(dprince): review these for removal. Some of these indexes
+ # were automatically created by SQLAlchemy migrate and *may* no longer
+ # be in use
+ Index('instance_type_id', instance_type_projects.c.instance_type_id),
+ Index('project_id', dns_domains.c.project_id),
+ Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
+ Index('backend_id', sm_volume.c.backend_id),
+ Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
- Index('instance_id', fixed_ips.c.instance_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
- Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
- Index('project_id', user_project_association.c.project_id),
- Index('network_id', virtual_interfaces.c.network_id),
- Index('instance_id', virtual_interfaces.c.instance_id),
+ Index('address', fixed_ips.c.address),
+ Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
+ Index('instance_uuid', instance_system_metadata.c.instance_uuid),
+ Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
+ Index('snapshot_id', block_device_mapping.c.snapshot_id),
+ Index('usage_id', reservations.c.usage_id),
+ Index('virtual_interfaces_instance_uuid_fkey',
+ virtual_interfaces.c.instance_uuid),
+ Index('volume_id', block_device_mapping.c.volume_id),
+ Index('volume_metadata_volume_id_fkey', volume_metadata.c.volume_id),
]
+ # MySQL specific indexes
if migrate_engine.name == 'mysql':
+ for index in mysql_indexes:
+ index.create(migrate_engine)
+
+ # PostgreSQL specific indexes
+ if migrate_engine.name == 'postgresql':
+ Index('address', fixed_ips.c.address).create()
+
+ # Common indexes
+ if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql':
for index in indexes:
index.create(migrate_engine)
fkeys = [
- [[user_project_role_association.c.user_id,
- user_project_role_association.c.project_id],
- [user_project_association.c.user_id,
- user_project_association.c.project_id],
- 'user_project_role_association_ibfk_1'],
- [[user_project_association.c.user_id],
- [users.c.id], 'user_project_association_ibfk_1'],
- [[user_project_association.c.project_id], [projects.c.id],
- 'user_project_association_ibfk_2'],
- [[instance_info_caches.c.instance_id], [instances.c.uuid],
- 'instance_info_caches_ibfk_1'],
+
+ [[fixed_ips.c.instance_uuid],
+ [instances.c.uuid],
+ 'fixed_ips_instance_uuid_fkey'],
+ [[block_device_mapping.c.instance_uuid],
+ [instances.c.uuid],
+ 'block_device_mapping_instance_uuid_fkey'],
+ [[consoles.c.instance_uuid],
+ [instances.c.uuid],
+ 'consoles_instance_uuid_fkey'],
+ [[instance_info_caches.c.instance_uuid],
+ [instances.c.uuid],
+ 'instance_info_caches_instance_uuid_fkey'],
+ [[instance_metadata.c.instance_uuid],
+ [instances.c.uuid],
+ 'instance_metadata_instance_uuid_fkey'],
+ [[instance_system_metadata.c.instance_uuid],
+ [instances.c.uuid],
+ 'instance_system_metadata_ibfk_1'],
+ [[instance_type_projects.c.instance_type_id],
+ [instance_types.c.id],
+ 'instance_type_projects_ibfk_1'],
+ [[iscsi_targets.c.volume_id],
+ [volumes.c.id],
+ 'iscsi_targets_volume_id_fkey'],
+ [[reservations.c.usage_id],
+ [quota_usages.c.id],
+ 'reservations_ibfk_1'],
+ [[security_group_instance_association.c.instance_uuid],
+ [instances.c.uuid],
+ 'security_group_instance_association_instance_uuid_fkey'],
+ [[sm_volume.c.backend_id],
+ [sm_backend_config.c.id],
+ 'sm_volume_ibfk_2'],
+ [[sm_volume.c.id],
+ [volumes.c.id],
+ 'sm_volume_id_fkey'],
+ [[virtual_interfaces.c.instance_uuid],
+ [instances.c.uuid],
+ 'virtual_interfaces_instance_uuid_fkey'],
+ [[volume_metadata.c.volume_id],
+ [volumes.c.id],
+ 'volume_metadata_volume_id_fkey'],
+
]
for fkey_pair in fkeys:
if migrate_engine.name == 'mysql':
- # For MySQL we name our fkeys explicitly so they match Essex
+ # For MySQL we name our fkeys explicitly so they match Folsom
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
+ # PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
- # Hopefully this entire loop to set the charset can go away during
- # the "E" release compaction. See the notes on the dns_domains
- # table above for why this is required vs. setting mysql_charset inline.
if migrate_engine.name == "mysql":
- tables = [
- # tables that are FK parents, must be converted early
- "aggregates", "console_pools", "instance_types", "instances",
- "projects", "security_groups", "sm_backend_config", "sm_flavors",
- "snapshots", "user_project_association", "users", "volume_types",
- "volumes",
- # those that are children and others later
- "agent_builds", "aggregate_hosts", "aggregate_metadata",
- "auth_tokens", "block_device_mapping", "bw_usage_cache",
- "certificates", "compute_nodes", "consoles", "fixed_ips",
- "floating_ips", "instance_actions", "instance_faults",
- "instance_info_caches", "instance_metadata",
- "instance_type_extra_specs", "iscsi_targets", "key_pairs",
- "migrate_version", "migrations", "networks", "provider_fw_rules",
- "quotas", "s3_images", "security_group_instance_association",
- "security_group_rules", "services", "sm_volume",
- "user_project_role_association", "user_role_association",
- "virtual_interfaces", "virtual_storage_arrays", "volume_metadata",
- "volume_type_extra_specs"]
- sql = "SET foreign_key_checks = 0;"
- for table in tables:
- sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table
- sql += "SET foreign_key_checks = 1;"
- sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \
- % migrate_engine.url.database
+ # In Folsom we explicitly converted migrate_version to UTF8.
+ sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
+ # Set default DB charset to UTF8.
+ sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \
+ migrate_engine.url.database
migrate_engine.execute(sql)
+ # TODO(dprince): due to the upgrade scripts in Folsom the unique key
+ # on instance_uuid is named 'instance_id'. Rename it in Grizzly?
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name='instance_id').create()
+
if migrate_engine.name == "postgresql":
- # NOTE(dprince): Need to rename the leftover zones stuff.
- # https://bugs.launchpad.net/nova/+bug/993667
- sql = "ALTER TABLE cells_id_seq RENAME TO zones_id_seq;"
- sql += "ALTER TABLE ONLY cells DROP CONSTRAINT cells_pkey;"
- sql += "ALTER TABLE ONLY cells ADD CONSTRAINT zones_pkey" \
- " PRIMARY KEY (id);"
-
- # NOTE(dprince): Need to rename the leftover quota_new stuff.
- # https://bugs.launchpad.net/nova/+bug/993669
- sql += "ALTER TABLE quotas_id_seq RENAME TO quotas_new_id_seq;"
- sql += "ALTER TABLE ONLY quotas DROP CONSTRAINT quotas_pkey;"
- sql += "ALTER TABLE ONLY quotas ADD CONSTRAINT quotas_new_pkey" \
- " PRIMARY KEY (id);"
+ # TODO(dprince): Drop this in Grizzly. Snapshots were converted
+ # to UUIDs in Folsom so we no longer require this autocreated
+ # sequence.
+ sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
+ SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
+ ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
+ nextval('snapshots_id_seq'::regclass);"""
+
+ # TODO(dprince): Drop this in Grizzly. Volumes were converted
+ # to UUIDs in Folsom so we no longer require this autocreated
+ # sequence.
+ sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
+ SELECT pg_catalog.setval('volumes_id_seq', 1, false);
+ ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
+ nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
+ # TODO(dprince): due to the upgrade scripts in Folsom the unique key
+ # on instance_uuid is named '.._instance_id_..'. Rename it in Grizzly?
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name='instance_info_caches_instance_id_key').create()
+
# populate initial instance types
_populate_instance_types(instance_types)
def downgrade(migrate_engine):
- raise Exception('Downgrade from Essex is unsupported.')
+ LOG.exception(_('Downgrade from Folsom is unsupported.'))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
new file mode 100644
index 000000000..a208aecf6
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
@@ -0,0 +1,55 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import and_, String, Column, MetaData, select, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ node = Column('node', String(length=255))
+
+ instances.create_column(node)
+
+ c_nodes = Table('compute_nodes', meta, autoload=True)
+ services = Table('services', meta, autoload=True)
+
+ # set instances.node = compute_nodes.hypervisore_hostname
+ q = select(
+ [instances.c.id, c_nodes.c.hypervisor_hostname],
+ whereclause=and_(
+ instances.c.deleted != True,
+ services.c.deleted != True,
+ services.c.binary == 'nova-compute',
+ c_nodes.c.deleted != True),
+ from_obj=instances.join(services,
+ instances.c.host == services.c.host)
+ .join(c_nodes,
+ services.c.id == c_nodes.c.service_id))
+ for (instance_id, hypervisor_hostname) in q.execute():
+ instances.update().where(instances.c.id == instance_id).\
+ values(node=hypervisor_hostname).\
+ execute()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ node = Column('node', String(length=255))
+
+ instances.drop_column(node)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py b/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py
index 6bc0aed91..397f8a62b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py
@@ -1,7 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -15,30 +12,30 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
+from sqlalchemy import MetaData, Table, Index
+
+INDEX_NAME = 'instances_host_node_deleted_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on _dnsdomain_get
+ instances = Table('instances', meta, autoload=True)
+
+ # Based on instance_get_all_host_and_node
# from: nova/db/sqlalchemy/api.py
- t = Table('dns_domains', meta, autoload=True)
- i = Index('dns_domains_domain_deleted_idx',
- t.c.domain, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
+ index = Index(INDEX_NAME,
+ instances.c.host, instances.c.node, instances.c.deleted)
+ index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('dns_domains', meta, autoload=True)
- i = Index('dns_domains_domain_deleted_idx',
- t.c.domain, t.c.deleted)
- i.drop(migrate_engine)
+ instances = Table('instances', meta, autoload=True)
+
+ index = Index(INDEX_NAME,
+ instances.c.host, instances.c.node, instances.c.deleted)
+ index.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py b/nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py
index 23f7d3cdb..1499bd351 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py
@@ -23,11 +23,12 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on agent_build_get_by_triple
+ t = Table('migrations', meta, autoload=True)
+
+ # Based on migration_get_in_progress_by_host
# from: nova/db/sqlalchemy/api.py
- t = Table('agent_builds', meta, autoload=True)
- i = Index('agent_builds_hypervisor_os_arch_idx',
- t.c.hypervisor, t.c.os, t.c.architecture)
+ i = Index('migrations_by_host_and_status_idx', t.c.deleted,
+ t.c.source_compute, t.c.dest_compute, t.c.status)
try:
i.create(migrate_engine)
except IntegrityError:
@@ -38,7 +39,8 @@ def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('agent_builds', meta, autoload=True)
- i = Index('agent_builds_hypervisor_os_arch_idx',
- t.c.hypervisor, t.c.os, t.c.architecture)
+ t = Table('migrations', meta, autoload=True)
+
+ i = Index('migrations_by_host_and_status_idx', t.c.deleted,
+ t.c.source_compute, t.c.dest_compute, t.c.status)
i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py
index 549426608..2faae3a8e 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,9 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Boolean, Column, MetaData, Table
-
from nova.openstack.common import log as logging
+from sqlalchemy import Column, String, MetaData, Table
LOG = logging.getLogger(__name__)
@@ -23,18 +24,16 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- disabled = Column('disabled', Boolean)
-
- instance_types.create_column(disabled)
- instance_types.update().values(disabled=False).execute()
+ instances = Table('instances', meta, autoload=True)
+ server_name = instances.columns.server_name
+ server_name.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- disabled = Column('disabled', Boolean)
-
- instance_types.drop_column(disabled)
+ instances = Table('instances', meta, autoload=True)
+ server_name = Column('server_name',
+ String(length=255))
+ instances.create_column(server_name)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql
new file mode 100644
index 000000000..89873ccd4
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql
@@ -0,0 +1,239 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE instances_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO instances_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances;
+
+ DROP TABLE instances;
+
+ CREATE TABLE instances (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ server_name VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ CREATE INDEX instances_host_node_deleted_idx
+ ON instances (host, node, deleted);
+
+ CREATE INDEX instances_host_deleted_idx
+ ON instances (host, deleted);
+
+ CREATE INDEX instances_reservation_id_idx
+ ON instances (reservation_id);
+
+ CREATE INDEX instances_terminated_at_launched_at_idx
+ ON instances (terminated_at, launched_at);
+
+ CREATE INDEX instances_uuid_deleted_idx
+ ON instances (uuid, deleted);
+
+ CREATE INDEX instances_task_state_updated_at_idx
+ ON instances (task_state, updated_at);
+
+ INSERT INTO instances
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ hostname,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances_backup;
+
+ DROP TABLE instances_backup;
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql
new file mode 100644
index 000000000..9a72d06c6
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql
@@ -0,0 +1,239 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE instances_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ server_name VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO instances_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ server_name,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances;
+
+ DROP TABLE instances;
+
+ CREATE TABLE instances (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ CREATE INDEX instances_host_node_deleted_idx
+ ON instances (host, node, deleted);
+
+ CREATE INDEX instances_host_deleted_idx
+ ON instances (host, deleted);
+
+ CREATE INDEX instances_reservation_id_idx
+ ON instances (reservation_id);
+
+ CREATE INDEX instances_terminated_at_launched_at_idx
+ ON instances (terminated_at, launched_at);
+
+ CREATE INDEX instances_uuid_deleted_idx
+ ON instances (uuid, deleted);
+
+ CREATE INDEX instances_task_state_updated_at_idx
+ ON instances (task_state, updated_at);
+
+ INSERT INTO instances
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances_backup;
+
+ DROP TABLE instances_backup;
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py
index 0e819a59d..8a471c057 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py
@@ -23,10 +23,12 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on aggregate_metadata_get_item
+ t = Table('fixed_ips', meta, autoload=True)
+
+ # Based on fixed_ip_delete_associate
# from: nova/db/sqlalchemy/api.py
- t = Table('aggregate_metadata', meta, autoload=True)
- i = Index('aggregate_metadata_key_idx', t.c.key)
+ i = Index('fixed_ips_deleted_allocated_idx',
+ t.c.address, t.c.deleted, t.c.allocated)
try:
i.create(migrate_engine)
except IntegrityError:
@@ -37,6 +39,8 @@ def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('aggregate_metadata', meta, autoload=True)
- i = Index('aggregate_metadata_key_idx', t.c.key)
+ t = Table('fixed_ips', meta, autoload=True)
+
+ i = Index('fixed_ips_deleted_allocated_idx',
+ t.c.address, t.c.deleted, t.c.allocated)
i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py b/nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
new file mode 100644
index 000000000..18aa206fe
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # NOTE(dprince): Remove unused snapshots/volumes sequences.
+ # These are leftovers from the ID --> UUID conversion for these tables
+ # that occurred in Folsom.
+ if migrate_engine.name == "postgresql":
+ base_query = """SELECT COUNT(*) FROM pg_class c
+ WHERE c.relkind = 'S'
+ AND relname = '%s';"""
+ result = migrate_engine.execute(base_query % "snapshots_id_seq")
+ if result.scalar() > 0:
+ sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
+ migrate_engine.execute(sql)
+
+ result = migrate_engine.execute(base_query % "volumes_id_seq")
+ if result.scalar() > 0:
+ sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
+ migrate_engine.execute(sql)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ if migrate_engine.name == "postgresql":
+ sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
+ SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
+ ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
+ nextval('snapshots_id_seq'::regclass);"""
+
+ sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
+ SELECT pg_catalog.setval('volumes_id_seq', 1, false);
+ ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
+ nextval('volumes_id_seq'::regclass);"""
+ migrate_engine.execute(sql)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py b/nova/db/sqlalchemy/migrate_repo/versions/141_update_migrations_instance_uuid.py
index 5e24b42c3..086435022 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/141_update_migrations_instance_uuid.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 IBM
+# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -14,20 +14,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Index, MetaData, Table
+from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- instances = Table('fixed_ips', meta, autoload=True)
- index = Index('address', instances.c.address)
- index.create(migrate_engine)
+
+ migrations = Table('migrations', meta, autoload=True)
+ migrations.c.instance_uuid.alter(String(36))
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- instances = Table('fixed_ips', meta, autoload=True)
- index = Index('address', instances.c.address)
- index.drop(migrate_engine)
+
+ migrations = Table('migrations', meta, autoload=True)
+ migrations.c.instance_uuid.alter(String(255))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py b/nova/db/sqlalchemy/migrate_repo/versions/142_add_migrations_instance_status_index.py
index 82517e53a..7aa358a25 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/142_add_migrations_instance_status_index.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 OpenStack LLC.
+# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,29 +16,25 @@
# under the License.
from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on key_pair_get
+ # Based on migration_get_by_instance_and_status
# from: nova/db/sqlalchemy/api.py
- t = Table('key_pairs', meta, autoload=True)
- i = Index('key_pair_user_id_name_idx',
- t.c.user_id, t.c.name)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
+ t = Table('migrations', meta, autoload=True)
+ i = Index('migrations_instance_uuid_and_status_idx', t.c.deleted,
+ t.c.instance_uuid, t.c.status)
+ i.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('key_pairs', meta, autoload=True)
- i = Index('key_pair_user_id_name_idx',
- t.c.user_id, t.c.name)
+ t = Table('migrations', meta, autoload=True)
+ i = Index('migrations_instance_uuid_and_status_idx', t.c.deleted,
+ t.c.instance_uuid, t.c.status)
i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/143_rename_instance_info_cache_sequence.py b/nova/db/sqlalchemy/migrate_repo/versions/143_rename_instance_info_cache_sequence.py
new file mode 100644
index 000000000..b7b867358
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/143_rename_instance_info_cache_sequence.py
@@ -0,0 +1,65 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table
+
+
+OLD_MYSQL_NAME = 'instance_id'
+NEW_MYSQL_NAME = 'instance_uuid'
+
+OLD_PG_NAME = 'instance_info_caches_instance_id_key'
+NEW_PG_NAME = 'instance_info_caches_instance_uuid_key'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # NOTE(dprince): Rename the unique key constraints for both MySQL
+ # and PostgreSQL so they reflect the most recent UUID conversions
+ # from Folsom.
+ instance_info_caches = Table('instance_info_caches', meta, autoload=True)
+
+ if migrate_engine.name == "mysql":
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=NEW_MYSQL_NAME).create()
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=OLD_MYSQL_NAME).drop()
+ if migrate_engine.name == "postgresql":
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=NEW_PG_NAME).create()
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=OLD_PG_NAME).drop()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_info_caches = Table('instance_info_caches', meta, autoload=True)
+
+ if migrate_engine.name == "mysql":
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=OLD_MYSQL_NAME).create()
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=NEW_MYSQL_NAME).drop()
+ if migrate_engine.name == "postgresql":
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=OLD_PG_NAME).create()
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name=NEW_PG_NAME).drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py
new file mode 100644
index 000000000..97b0f7bb0
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/144_add_node_to_migrations.py
@@ -0,0 +1,185 @@
+# Copyright 2012 OpenSmigrations.ck LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import and_, Index, String, Column, MetaData, Table
+from sqlalchemy.sql.expression import select, update
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ migrations = Table('migrations', meta, autoload=True)
+
+ # drop old index:
+ i = _old_index(migrations)
+ i.drop(migrate_engine)
+
+ # add columns. a node is the same as a compute node's
+ # hypervisor hostname:
+ source_node = Column('source_node', String(length=255))
+ migrations.create_column(source_node)
+
+ dest_node = Column('dest_node', String(length=255))
+ migrations.create_column(dest_node)
+
+ # map compute hosts => list of compute nodes
+ nodemap = _map_nodes(meta)
+
+ # update migration and instance records with nodes:
+ _update_nodes(nodemap, instances, migrations)
+
+ # add new index:
+ migrations = Table('migrations', meta, autoload=True)
+ _add_new_index(migrations, migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ migrations = Table('migrations', meta, autoload=True)
+
+ # drop new columns:
+ source_node = Column('source_node', String(length=255))
+ migrations.drop_column(source_node)
+
+ dest_node = Column('dest_node', String(length=255))
+ migrations.drop_column(dest_node)
+
+ # drop new index:
+ _drop_new_index(migrations, migrate_engine)
+
+ # re-add old index:
+ i = _old_index(migrations)
+ i.create(migrate_engine)
+
+
+def _map_nodes(meta):
+ """Map host to compute node(s) for the purpose of determining which hosts
+ are single vs multi-node.
+ """
+
+ services = Table('services', meta, autoload=True)
+ c_nodes = Table('compute_nodes', meta, autoload=True)
+
+ q = select([services.c.host, c_nodes.c.hypervisor_hostname],
+
+ whereclause=and_(c_nodes.c.deleted == False,
+ services.c.deleted == False),
+
+ from_obj=c_nodes.join(services,
+ c_nodes.c.service_id == services.c.id)
+ )
+
+ nodemap = {}
+
+ for (host, node) in q.execute():
+ nodes = nodemap.setdefault(host, [])
+ nodes.append(node)
+
+ return nodemap
+
+
+def _add_new_index(migrations, migrate_engine):
+ if migrate_engine.name == "mysql":
+ # mysql-specific index by leftmost 100 chars. (mysql gets angry if the
+ # index key length is too long.)
+ sql = ("create index migrations_by_host_nodes_and_status_idx ON "
+ "migrations (deleted, source_compute(100), dest_compute(100), "
+ "source_node(100), dest_node(100), status)")
+ migrate_engine.execute(sql)
+
+ else:
+ i = Index('migrations_by_host_nodes_and_status_idx',
+ migrations.c.deleted, migrations.c.source_compute,
+ migrations.c.dest_compute, migrations.c.source_node,
+ migrations.c.dest_node, migrations.c.status)
+ i.create(migrate_engine)
+
+
+def _drop_new_index(migrations, migrate_engine):
+ if migrate_engine.name == "mysql":
+ sql = ("drop index migrations_by_host_nodes_and_status_idx on "
+ "migrations")
+ migrate_engine.execute(sql)
+
+ else:
+ i = Index('migrations_by_host_nodes_and_status_idx',
+ migrations.c.deleted, migrations.c.source_compute,
+ migrations.c.dest_compute, migrations.c.source_node,
+ migrations.c.dest_node, migrations.c.status)
+ i.drop(migrate_engine)
+
+
+def _old_index(migrations):
+ i = Index('migrations_by_host_and_status_idx', migrations.c.deleted,
+ migrations.c.source_compute, migrations.c.dest_compute,
+ migrations.c.status)
+ return i
+
+
+def _update_nodes(nodemap, instances, migrations):
+ """For each migration and matching instance record, update the node columns
+ if the referenced host is single-node.
+
+ Skip updates for multi-node hosts. In that case, there's no way to
+ determine which node on a host the record should be associated with.
+ """
+ q = select([migrations.c.id, migrations.c.source_compute,
+ migrations.c.dest_compute, instances.c.uuid, instances.c.host,
+ instances.c.node],
+
+ whereclause=and_(migrations.c.source_compute != None,
+ migrations.c.dest_compute != None,
+ instances.c.deleted == False,
+ migrations.c.status != 'reverted',
+ migrations.c.status != 'error'),
+
+ from_obj=migrations.join(instances,
+ migrations.c.instance_uuid == instances.c.uuid)
+ )
+
+ result = q.execute()
+ for migration_id, src, dest, uuid, instance_host, instance_node in result:
+
+ values = {}
+
+ nodes = nodemap.get(src, [])
+
+ if len(nodes) == 1:
+ # the source host is a single-node, safe to update node
+ node = nodes[0]
+ values['source_node'] = node
+
+ if src == instance_host and node != instance_node:
+ update(instances).where(instances.c.uuid == uuid).\
+ values(node=node)
+
+ nodes = nodemap.get(dest, [])
+ if len(nodes) == 1:
+ # the dest host is a single-node, safe to update node
+ node = nodes[0]
+ values['dest_node'] = node
+
+ if dest == instance_host and node != instance_node:
+ update(instances).where(instances.c.uuid == uuid).\
+ values(node=node)
+
+ if values:
+ q = update(migrations,
+ values=values,
+ whereclause=migrations.c.id == migration_id)
+ q.execute()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py
new file mode 100644
index 000000000..7adbcb938
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py
@@ -0,0 +1,69 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime
+from sqlalchemy import Boolean, BigInteger, MetaData, Integer, String, Table
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # Create new table
+ volume_usage_cache = Table('volume_usage_cache', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_id', String(36), nullable=False),
+ Column("instance_id", Integer()),
+ Column('tot_last_refreshed', DateTime(timezone=False)),
+ Column('tot_reads', BigInteger(), default=0),
+ Column('tot_read_bytes', BigInteger(), default=0),
+ Column('tot_writes', BigInteger(), default=0),
+ Column('tot_write_bytes', BigInteger(), default=0),
+ Column('curr_last_refreshed', DateTime(timezone=False)),
+ Column('curr_reads', BigInteger(), default=0),
+ Column('curr_read_bytes', BigInteger(), default=0),
+ Column('curr_writes', BigInteger(), default=0),
+ Column('curr_write_bytes', BigInteger(), default=0),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ try:
+ volume_usage_cache.create()
+ except Exception:
+ LOG.exception("Exception while creating table 'volume_usage_cache'")
+ meta.drop_all(tables=[volume_usage_cache])
+ raise
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ volume_usage_cache = Table('volume_usage_cache', meta, autoload=True)
+ try:
+ volume_usage_cache.drop()
+ except Exception:
+ LOG.error(_("volume_usage_cache table not dropped"))
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py b/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
index df4a83843..04f31ce5f 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/146_aggregate_zones.py
@@ -13,7 +13,6 @@
# under the License.
from sqlalchemy import String, Column, MetaData, Table, delete, select
-from migrate.changeset import UniqueConstraint
from nova.openstack.common import log as logging
@@ -24,10 +23,9 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
aggregates = Table('aggregates', meta, autoload=True)
aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
+ # migrate data
record_list = list(aggregates.select().execute())
for rec in record_list:
row = aggregate_metadata.insert()
@@ -35,23 +33,11 @@ def upgrade(migrate_engine):
'updated_at': rec['updated_at'],
'deleted_at': rec['deleted_at'],
'deleted': rec['deleted'],
- 'key': 'operational_state',
- 'value': rec['operational_state'],
+ 'key': 'availability_zone',
+ 'value': rec['availability_zone'],
'aggregate_id': rec['id'],
})
- aggregates.drop_column('operational_state')
-
- aggregate_hosts = Table('aggregate_hosts', meta, autoload=True)
- if dialect.startswith('sqlite'):
- aggregate_hosts.c.host.alter(unique=False)
- elif dialect.startswith('postgres'):
- ucon = UniqueConstraint('host',
- name='aggregate_hosts_host_key',
- table=aggregate_hosts)
- ucon.drop()
- else:
- col = aggregate_hosts.c.host
- UniqueConstraint(col, name='host').drop()
+ aggregates.drop_column('availability_zone')
def downgrade(migrate_engine):
@@ -60,13 +46,12 @@ def downgrade(migrate_engine):
aggregates = Table('aggregates', meta, autoload=True)
aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
- operational_state = Column('operational_state', String(255))
- aggregates.create_column(operational_state)
- aggregates.update().values(operational_state=select(
+ availability_zone = Column('availability_zone', String(255))
+ aggregates.create_column(availability_zone)
+ # migrate data
+ aggregates.update().values(availability_zone=select(
[aggregate_metadata.c.value]).where(aggregates.c.id ==
- aggregate_metadata.c.aggregate_id and aggregate_metadata.c.key ==
- 'operational_state')).execute()
- delete(aggregate_metadata, aggregate_metadata.c.key == 'operational_state')
- aggregates.c.operational_state.alter(nullable=False)
- aggregate_hosts = Table('aggregate_hosts', meta, autoload=True)
- aggregate_hosts.c.host.alter(unique=True)
+ aggregate_metadata.c.aggregate_id).where(aggregate_metadata.c.key ==
+ 'availability_zone')).execute()
+ delete(aggregate_metadata, aggregate_metadata.c.key == 'availability_zone')
+ aggregates.c.availability_zone.alter(nullable=False)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
new file mode 100644
index 000000000..a20799fbe
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
@@ -0,0 +1,83 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import String, Column, MetaData, Table, select
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+""" Remove availability_zone column from services model and replace with
+ aggregate based zone."""
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ services = Table('services', meta, autoload=True)
+ aggregates = Table('aggregates', meta, autoload=True)
+ aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
+ # migrate data
+ record_list = list(services.select().execute())
+ for rec in record_list:
+ # Only need to migrate nova-compute availability_zones
+ if rec['binary'] != 'nova-compute':
+ continue
+ # if zone doesn't exist create
+ result = aggregate_metadata.select().where(aggregate_metadata.c.key ==
+ 'availability_zone' and
+ aggregate_metadata.c.key == rec['availability_zone']).execute()
+ result = [r for r in result]
+ if len(result) > 0:
+ agg_id = result[0].aggregate_id
+ else:
+ agg = aggregates.insert()
+ result = agg.execute({'name': rec['availability_zone']})
+ agg_id = result.inserted_primary_key[0]
+ row = aggregate_metadata.insert()
+ row.execute({'created_at': rec['created_at'],
+ 'updated_at': rec['updated_at'],
+ 'deleted_at': rec['deleted_at'],
+ 'deleted': rec['deleted'],
+ 'key': 'availability_zone',
+ 'value': rec['availability_zone'],
+ 'aggregate_id': agg_id,
+ })
+ # add host to zone
+ agg_hosts = Table('aggregate_hosts', meta, autoload=True)
+ row = agg_hosts.insert()
+ row.execute({'host': rec['host'], 'aggregate_id': agg_id})
+
+ services.drop_column('availability_zone')
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ services = Table('services', meta, autoload=True)
+ aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
+ agg_hosts = Table('aggregate_hosts', meta, autoload=True)
+ availability_zone = Column('availability_zone', String(255),
+ default='nova')
+ services.create_column(availability_zone)
+ # migrate data
+ services.update().values(availability_zone=select(
+ [aggregate_metadata.c.value]).
+ where(agg_hosts.c.aggregate_id == aggregate_metadata.c.aggregate_id).
+ where(aggregate_metadata.c.key == 'availability_zone').
+ where(agg_hosts.c.host == services.c.host).
+ where(services.c.binary == 'nova-compute')).execute()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py b/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py
new file mode 100644
index 000000000..6adfb1dc1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import DateTime
+from sqlalchemy import ForeignKey
+from sqlalchemy import Index
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_actions = Table('instance_actions', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('action', String(length=255)),
+ Column('instance_uuid', String(length=36)),
+ Column('request_id', String(length=255)),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('start_time', DateTime),
+ Column('finish_time', DateTime),
+ Column('message', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ instance_actions_events = Table('instance_actions_events', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('event', String(length=255)),
+ Column('action_id', Integer, ForeignKey('instance_actions.id')),
+ Column('start_time', DateTime),
+ Column('finish_time', DateTime),
+ Column('result', String(length=255)),
+ Column('traceback', Text),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ try:
+ instance_actions.create()
+ instance_actions_events.create()
+ except Exception:
+ LOG.exception("Exception while creating table 'instance_actions' or "
+ "'instance_actions_events'")
+ meta.drop_all(tables=[instance_actions, instance_actions_events])
+ raise
+
+ Index('instance_uuid_idx',
+ instance_actions.c.instance_uuid).create(migrate_engine)
+ Index('request_id_idx',
+ instance_actions.c.request_id).create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ try:
+ instance_actions = Table('instance_actions', meta, autoload=True)
+ instance_actions.drop()
+ except Exception:
+ LOG.exception("Exception dropping table 'instance_actions'")
+
+ try:
+ instance_actions_events = Table('instance_actions_events', meta,
+ autoload=True)
+ instance_actions_events.drop()
+ except Exception:
+ LOG.exception("Exception dropping table 'instance_actions_events")
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index c6c9e9b5f..dbc1ed432 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -22,7 +22,6 @@ import os
from nova.db import migration
from nova.db.sqlalchemy.session import get_engine
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
@@ -61,8 +60,6 @@ from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
-FLAGS = flags.FLAGS
-
_REPOSITORY = None
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 8022bad12..52985a3eb 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -22,18 +22,16 @@ SQLAlchemy models for nova data.
"""
from sqlalchemy import Column, Integer, BigInteger, String, schema
-from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
from nova.db.sqlalchemy.session import get_session
-from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
BASE = declarative_base()
@@ -50,17 +48,20 @@ class NovaBase(object):
"""Save this object."""
if not session:
session = get_session()
- session.add(self)
- try:
+ # NOTE(boris-42): This part of code should be look like:
+ # sesssion.add(self)
+ # session.flush()
+ # But there is a bug in sqlalchemy and eventlet that
+ # raises NoneType exception if there is no running
+ # transaction and rollback is called. As long as
+ # sqlalchemy has this bug we have to create transaction
+ # explicity.
+ with session.begin(subtransactions=True):
+ session.add(self)
session.flush()
- except IntegrityError, e:
- if str(e).endswith('is not unique'):
- raise exception.Duplicate(str(e))
- else:
- raise
-
- def delete(self, session=None):
- """Delete this object."""
+
+ def soft_delete(self, session=None):
+ """Mark this object as deleted."""
self.deleted = True
self.deleted_at = timeutils.utcnow()
self.save(session=session)
@@ -89,7 +90,7 @@ class NovaBase(object):
return n, getattr(self, n)
def update(self, values):
- """Make the model object behave like a dict"""
+ """Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
@@ -114,7 +115,6 @@ class Service(BASE, NovaBase):
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
- availability_zone = Column(String(255), default='nova')
class ComputeNode(BASE, NovaBase):
@@ -181,7 +181,7 @@ class ComputeNodeStat(BASE, NovaBase):
class Certificate(BASE, NovaBase):
- """Represents a x509 certificate"""
+ """Represents a x509 certificate."""
__tablename__ = 'certificates'
id = Column(Integer, primary_key=True)
@@ -200,7 +200,7 @@ class Instance(BASE, NovaBase):
@property
def name(self):
try:
- base_name = FLAGS.instance_name_template % self.id
+ base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
@@ -214,7 +214,7 @@ class Instance(BASE, NovaBase):
continue
info[key] = self[key]
try:
- base_name = FLAGS.instance_name_template % info
+ base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@@ -228,7 +228,7 @@ class Instance(BASE, NovaBase):
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
- server_name = Column(String(255))
+ hostname = Column(String(255))
# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
@@ -249,8 +249,12 @@ class Instance(BASE, NovaBase):
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
- hostname = Column(String(255))
+ # This is not related to hostname, above. It refers
+ # to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ # To identify the "ComputeNode" which the instance resides in.
+ # This equals to ComputeNode.hypervisor_hostname.
+ node = Column(String(255))
# *not* flavor_id
instance_type_id = Column(Integer)
@@ -302,6 +306,10 @@ class Instance(BASE, NovaBase):
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False, nullable=False)
+ # OpenStack compute cell name. This will only be set at the top of
+ # the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
+ cell_name = Column(String(255))
+
class InstanceInfoCache(BASE, NovaBase):
"""
@@ -322,7 +330,7 @@ class InstanceInfoCache(BASE, NovaBase):
class InstanceTypes(BASE, NovaBase):
- """Represent possible instance_types or flavor of VM offered"""
+ """Represent possible instance_types or flavor of VM offered."""
__tablename__ = "instance_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
@@ -352,7 +360,7 @@ class Volume(BASE, NovaBase):
@property
def name(self):
- return FLAGS.volume_name_template % self.id
+ return CONF.volume_name_template % self.id
ec2_id = Column(Integer)
user_id = Column(String(255))
@@ -382,49 +390,6 @@ class Volume(BASE, NovaBase):
volume_type_id = Column(Integer)
-class VolumeMetadata(BASE, NovaBase):
- """Represents a metadata key/value pair for a volume"""
- __tablename__ = 'volume_metadata'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- value = Column(String(255))
- volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
- volume = relationship(Volume, backref="volume_metadata",
- foreign_keys=volume_id,
- primaryjoin='and_('
- 'VolumeMetadata.volume_id == Volume.id,'
- 'VolumeMetadata.deleted == False)')
-
-
-class VolumeTypes(BASE, NovaBase):
- """Represent possible volume_types of volumes offered"""
- __tablename__ = "volume_types"
- id = Column(Integer, primary_key=True)
- name = Column(String(255))
-
- volumes = relationship(Volume,
- backref=backref('volume_type', uselist=False),
- foreign_keys=id,
- primaryjoin='and_('
- 'Volume.volume_type_id == VolumeTypes.id, '
- 'VolumeTypes.deleted == False)')
-
-
-class VolumeTypeExtraSpecs(BASE, NovaBase):
- """Represents additional specs as key/value pairs for a volume_type"""
- __tablename__ = 'volume_type_extra_specs'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- value = Column(String(255))
- volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
- nullable=False)
- volume_type = relationship(VolumeTypes, backref="extra_specs",
- foreign_keys=volume_type_id,
- primaryjoin='and_('
- 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
- 'VolumeTypeExtraSpecs.deleted == False)')
-
-
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
@@ -509,11 +474,11 @@ class Snapshot(BASE, NovaBase):
@property
def name(self):
- return FLAGS.snapshot_name_template % self.id
+ return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
- return FLAGS.volume_name_template % self.volume_id
+ return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
@@ -528,7 +493,7 @@ class Snapshot(BASE, NovaBase):
class BlockDeviceMapping(BASE, NovaBase):
- """Represents block device mapping that is defined by EC2"""
+ """Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
id = Column(Integer, primary_key=True, autoincrement=True)
@@ -565,7 +530,7 @@ class BlockDeviceMapping(BASE, NovaBase):
class IscsiTarget(BASE, NovaBase):
- """Represents an iscsi target for a given host"""
+ """Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"), )
id = Column(Integer, primary_key=True)
@@ -670,15 +635,23 @@ class Migration(BASE, NovaBase):
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
+ # nodes are equivalent to a compute node's 'hypvervisor_hostname'
+ source_node = Column(String(255))
+ dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
- instance_uuid = Column(String(255), ForeignKey('instances.uuid'),
+ instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=True)
#TODO(_cerberus_): enum
status = Column(String(255))
+ instance = relationship("Instance", foreign_keys=instance_uuid,
+ primaryjoin='and_(Migration.instance_uuid == '
+ 'Instance.uuid, Instance.deleted == '
+ 'False)')
+
class Network(BASE, NovaBase):
"""Represents a network."""
@@ -793,7 +766,7 @@ class Console(BASE, NovaBase):
class InstanceMetadata(BASE, NovaBase):
- """Represents a user-provided metadata key/value pair for an instance"""
+ """Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@@ -809,7 +782,7 @@ class InstanceMetadata(BASE, NovaBase):
class InstanceSystemMetadata(BASE, NovaBase):
- """Represents a system-owned metadata key/value pair for an instance"""
+ """Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@@ -826,7 +799,7 @@ class InstanceSystemMetadata(BASE, NovaBase):
class InstanceTypeProjects(BASE, NovaBase):
- """Represent projects associated instance_types"""
+ """Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
@@ -841,7 +814,7 @@ class InstanceTypeProjects(BASE, NovaBase):
class InstanceTypeExtraSpecs(BASE, NovaBase):
- """Represents additional specs as key/value pairs for an instance_type"""
+ """Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@@ -855,6 +828,30 @@ class InstanceTypeExtraSpecs(BASE, NovaBase):
'InstanceTypeExtraSpecs.deleted == False)')
+class Cell(BASE, NovaBase):
+ """Represents parent and child cells of this cell. Cells can
+ have multiple parents and children, so there could be any number
+ of entries with is_parent=True or False
+ """
+ __tablename__ = 'cells'
+ id = Column(Integer, primary_key=True)
+ # Name here is the 'short name' of a cell. For instance: 'child1'
+ name = Column(String(255))
+ api_url = Column(String(255))
+ # FIXME(comstud): username and password refer to the credentials
+ # used for talking with the AMQP server within a particular cell.
+ # This table needs cleanup to support more generic cells
+ # communication (including via 0mq, for instance)
+ username = Column(String(255))
+ password = Column(String(255))
+ weight_offset = Column(Float(), default=0.0)
+ weight_scale = Column(Float(), default=1.0)
+ is_parent = Column(Boolean())
+ rpc_host = Column(String(255))
+ rpc_port = Column(Integer())
+ rpc_virtual_host = Column(String(255))
+
+
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
@@ -877,7 +874,6 @@ class Aggregate(BASE, NovaBase):
__tablename__ = 'aggregates'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
- availability_zone = Column(String(255), nullable=False)
_hosts = relationship(AggregateHost,
lazy="joined",
secondary="aggregate_hosts",
@@ -903,6 +899,9 @@ class Aggregate(BASE, NovaBase):
'Aggregate.deleted == False)',
backref='aggregates')
+ def _extra_keys(self):
+ return ['hosts', 'metadetails', 'availability_zone']
+
@property
def hosts(self):
return [h.host for h in self._hosts]
@@ -911,6 +910,12 @@ class Aggregate(BASE, NovaBase):
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
+ @property
+ def availability_zone(self):
+ if 'availability_zone' not in self.metadetails:
+ return None
+ return self.metadetails['availability_zone']
+
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
@@ -925,7 +930,7 @@ class AgentBuild(BASE, NovaBase):
class BandwidthUsage(BASE, NovaBase):
- """Cache for instance bandwidth usage data pulled from the hypervisor"""
+ """Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
@@ -938,53 +943,45 @@ class BandwidthUsage(BASE, NovaBase):
last_ctr_out = Column(BigInteger)
+class VolumeUsage(BASE, NovaBase):
+ """Cache for volume usage data pulled from the hypervisor."""
+ __tablename__ = 'volume_usage_cache'
+ id = Column(Integer, primary_key=True, nullable=False)
+ volume_id = Column(String(36), nullable=False)
+ instance_id = Column(Integer)
+ tot_last_refreshed = Column(DateTime)
+ tot_reads = Column(BigInteger, default=0)
+ tot_read_bytes = Column(BigInteger, default=0)
+ tot_writes = Column(BigInteger, default=0)
+ tot_write_bytes = Column(BigInteger, default=0)
+ curr_last_refreshed = Column(DateTime)
+ curr_reads = Column(BigInteger, default=0)
+ curr_read_bytes = Column(BigInteger, default=0)
+ curr_writes = Column(BigInteger, default=0)
+ curr_write_bytes = Column(BigInteger, default=0)
+
+
class S3Image(BASE, NovaBase):
- """Compatibility layer for the S3 image service talking to Glance"""
+ """Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
- """Compatibility layer for the EC2 volume service"""
+ """Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
- """Compatibility layer for the EC2 snapshot service"""
+ """Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
-class SMFlavors(BASE, NovaBase):
- """Represents a flavor for SM volumes."""
- __tablename__ = 'sm_flavors'
- id = Column(Integer(), primary_key=True)
- label = Column(String(255))
- description = Column(String(255))
-
-
-class SMBackendConf(BASE, NovaBase):
- """Represents the connection to the backend for SM."""
- __tablename__ = 'sm_backend_config'
- id = Column(Integer(), primary_key=True)
- flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
- sr_uuid = Column(String(255))
- sr_type = Column(String(255))
- config_params = Column(String(2047))
-
-
-class SMVolume(BASE, NovaBase):
- __tablename__ = 'sm_volume'
- id = Column(String(36), ForeignKey(Volume.id), primary_key=True)
- backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
- nullable=False)
- vdi_uuid = Column(String(255))
-
-
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
id = Column(Integer(), primary_key=True, autoincrement=True)
@@ -996,15 +993,44 @@ class InstanceFault(BASE, NovaBase):
details = Column(Text)
+class InstanceAction(BASE, NovaBase):
+ """Track client actions on an instance."""
+ __tablename__ = 'instance_actions'
+ id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
+ action = Column(String(255))
+ instance_uuid = Column(String(36),
+ ForeignKey('instances.uuid'),
+ nullable=False)
+ request_id = Column(String(255))
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+ start_time = Column(DateTime, default=timeutils.utcnow)
+ finish_time = Column(DateTime)
+ message = Column(String(255))
+
+
+class InstanceActionEvent(BASE, NovaBase):
+ """Track events that occur during an InstanceAction."""
+ __tablename__ = 'instance_actions_events'
+ id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
+ event = Column(String(255))
+ action_id = Column(Integer, ForeignKey('instance_actions.id'),
+ nullable=False)
+ start_time = Column(DateTime, default=timeutils.utcnow)
+ finish_time = Column(DateTime)
+ result = Column(String(255))
+ traceback = Column(Text)
+
+
class InstanceIdMapping(BASE, NovaBase):
- """Compatability layer for the EC2 instance service"""
+ """Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
- """Audit log for background periodic tasks"""
+ """Audit log for background periodic tasks."""
__tablename__ = 'task_log'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 184d279ae..9c896ae97 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -158,23 +158,139 @@ There are some things which it is best to avoid:
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
+
+Efficient use of soft deletes:
+
+* There are two possible ways to mark a record as deleted:
+ model.soft_delete() and query.soft_delete().
+
+ model.soft_delete() method works with single already fetched entry.
+ query.soft_delete() makes only one db request for all entries that correspond
+ to query.
+
+* In almost all cases you should use query.soft_delete(). Some examples:
+
+ def soft_delete_bar():
+ count = model_query(BarModel).find(some_condition).soft_delete()
+ if count == 0:
+ raise Exception("0 entries were soft deleted")
+
+ def complex_soft_delete_with_synchronization_bar(session=None):
+ if session is None:
+ session = get_session()
+ with session.begin(subtransactions=True):
+ count = model_query(BarModel).\
+ find(some_condition).\
+ soft_delete(synchronize_session=True)
+ # Here synchronize_session is required, because we
+ # don't know what is going on in outer session.
+ if count == 0:
+ raise Exception("0 entries were soft deleted")
+
+* There is only one situation where model.soft_delete() is appropriate: when
+ you fetch a single record, work with it, and mark it as deleted in the same
+ transaction.
+
+ def soft_delete_bar_model():
+ session = get_session()
+ with session.begin():
+ bar_ref = model_query(BarModel).find(some_condition).first()
+ # Work with bar_ref
+ bar_ref.soft_delete(session=session)
+
+ However, if you need to work with all entries that correspond to query and
+ then soft delete them you should use query.soft_delete() method:
+
+ def soft_delete_multi_models():
+ session = get_session()
+ with session.begin():
+ query = model_query(BarModel, session=session).\
+ find(some_condition)
+ model_refs = query.all()
+ # Work with model_refs
+ query.soft_delete(synchronize_session=False)
+ # synchronize_session=False should be set if there is no outer
+ # session and these entries are not used after this.
+
+ When working with many rows, it is very important to use query.soft_delete,
+ which issues a single query. Using model.soft_delete(), as in the following
+ example, is very inefficient.
+
+ for bar_ref in bar_refs:
+ bar_ref.soft_delete(session=session)
+ # This will produce count(bar_refs) db requests.
"""
import re
import time
+from eventlet import db_pool
from eventlet import greenthread
-from sqlalchemy.exc import DisconnectionError, OperationalError
+try:
+ import MySQLdb
+except ImportError:
+ MySQLdb = None
+from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
import sqlalchemy.interfaces
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
+from sqlalchemy.sql.expression import literal_column
-import nova.exception
-import nova.flags as flags
+from nova.exception import DBDuplicateEntry
+from nova.exception import DBError
+from nova.exception import InvalidUnicodeParameter
+from nova.openstack.common import cfg
import nova.openstack.common.log as logging
-
-
-FLAGS = flags.FLAGS
+from nova.openstack.common import timeutils
+from nova import paths
+
+
+sql_opts = [
+ cfg.StrOpt('sql_connection',
+ default='sqlite:///' + paths.state_path_def('$sqlite_db'),
+ help='The SQLAlchemy connection string used to connect to the '
+ 'database'),
+ cfg.StrOpt('sqlite_db',
+ default='nova.sqlite',
+ help='the filename to use with sqlite'),
+ cfg.IntOpt('sql_idle_timeout',
+ default=3600,
+ help='timeout before idle sql connections are reaped'),
+ cfg.BoolOpt('sqlite_synchronous',
+ default=True,
+ help='If passed, use synchronous mode for sqlite'),
+ cfg.IntOpt('sql_min_pool_size',
+ default=1,
+ help='Minimum number of SQL connections to keep open in a '
+ 'pool'),
+ cfg.IntOpt('sql_max_pool_size',
+ default=5,
+ help='Maximum number of SQL connections to keep open in a '
+ 'pool'),
+ cfg.IntOpt('sql_max_retries',
+ default=10,
+ help='maximum db connection retries during startup. '
+ '(setting -1 implies an infinite retry count)'),
+ cfg.IntOpt('sql_retry_interval',
+ default=10,
+ help='interval between retries of opening a sql connection'),
+ cfg.IntOpt('sql_max_overflow',
+ default=None,
+ help='If set, use this value for max_overflow with sqlalchemy'),
+ cfg.IntOpt('sql_connection_debug',
+ default=0,
+ help='Verbosity of SQL debugging information. 0=None, '
+ '100=Everything'),
+ cfg.BoolOpt('sql_connection_trace',
+ default=False,
+ help='Add python stack traces to SQL as comment strings'),
+ cfg.BoolOpt('sql_dbpool_enable',
+ default=False,
+ help="enable the use of eventlet's db_pool for MySQL"),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(sql_opts)
LOG = logging.getLogger(__name__)
_ENGINE = None
@@ -190,27 +306,99 @@ def get_session(autocommit=True, expire_on_commit=False):
_MAKER = get_maker(engine, autocommit, expire_on_commit)
session = _MAKER()
- session = wrap_session(session)
return session
-def wrap_session(session):
- """Return a session whose exceptions are wrapped."""
- session.query = nova.exception.wrap_db_error(session.query)
- session.flush = nova.exception.wrap_db_error(session.flush)
- return session
+# note(boris-42): In current versions of DB backends unique constraint
+# violation messages follow the structure:
+#
+# sqlite:
+# 1 column - (IntegrityError) column c1 is not unique
+# N columns - (IntegrityError) column c1, c2, ..., N are not unique
+#
+# postgres:
+# 1 column - (IntegrityError) duplicate key value violates unique
+# constraint "users_c1_key"
+# N columns - (IntegrityError) duplicate key value violates unique
+# constraint "name_of_our_constraint"
+#
+# mysql:
+# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
+# 'c1'")
+# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
+# with -' for key 'name_of_our_constraint'")
+_RE_DB = {
+ "sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
+ "postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
+ "mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
+}
+
+
+def raise_if_duplicate_entry_error(integrity_error, engine_name):
+ """
+ In this function will be raised DBDuplicateEntry exception if integrity
+ error wrap unique constraint violation.
+ """
+
+ def get_columns_from_uniq_cons_or_name(columns):
+ # note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3"
+ # means that columns c1, c2, c3 are in UniqueConstraint.
+ uniqbase = "uniq_"
+ if not columns.startswith(uniqbase):
+ if engine_name == "postgresql":
+ return [columns[columns.index("_") + 1:columns.rindex("_")]]
+ return [columns]
+ return columns[len(uniqbase):].split("_x_")
+
+ if engine_name not in ["mysql", "sqlite", "postgresql"]:
+ return
+
+ m = _RE_DB[engine_name].match(integrity_error.message)
+ if not m:
+ return
+ columns = m.group(1)
+
+ if engine_name == "sqlite":
+ columns = columns.strip().split(", ")
+ else:
+ columns = get_columns_from_uniq_cons_or_name(columns)
+ raise DBDuplicateEntry(columns, integrity_error)
+
+
+def wrap_db_error(f):
+ def _wrap(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except UnicodeEncodeError:
+ raise InvalidUnicodeParameter()
+ # note(boris-42): We should catch unique constraint violation and
+ # wrap it by our own DBDuplicateEntry exception. Unique constraint
+ # violation is wrapped by IntegrityError.
+ except IntegrityError, e:
+ # note(boris-42): SqlAlchemy doesn't unify errors from different
+ # DBs so we must do this. Also in some tables (for example
+ # instance_types) there are more than one unique constraint. This
+ # means we should get names of columns, which values violate
+ # unique constraint, from error message.
+ raise_if_duplicate_entry_error(e, get_engine().name)
+ raise DBError(e)
+ except Exception, e:
+ LOG.exception(_('DB exception wrapped.'))
+ raise DBError(e)
+ _wrap.func_name = f.func_name
+ return _wrap
def get_engine():
"""Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None:
- _ENGINE = create_engine(FLAGS.sql_connection)
+ _ENGINE = create_engine(CONF.sql_connection)
return _ENGINE
def synchronous_switch_listener(dbapi_conn, connection_rec):
- """Switch sqlite connections to non-synchronous mode"""
+ """Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
@@ -245,7 +433,7 @@ def ping_listener(dbapi_conn, connection_rec, connection_proxy):
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
- LOG.warn('Got mysql server has gone away: %s', ex)
+ LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise DisconnectionError("Database server went away")
else:
raise
@@ -267,23 +455,42 @@ def create_engine(sql_connection):
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
- "pool_recycle": FLAGS.sql_idle_timeout,
+ "pool_recycle": CONF.sql_idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
- if FLAGS.sql_connection_debug >= 100:
+ if CONF.sql_connection_debug >= 100:
engine_args['echo'] = 'debug'
- elif FLAGS.sql_connection_debug >= 50:
+ elif CONF.sql_connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
engine_args["poolclass"] = NullPool
- if FLAGS.sql_connection == "sqlite://":
+ if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
+ elif all((CONF.sql_dbpool_enable, MySQLdb,
+ "mysql" in connection_dict.drivername)):
+ LOG.info(_("Using mysql/eventlet db_pool."))
+ # MySQLdb won't accept 'None' in the password field
+ password = connection_dict.password or ''
+ pool_args = {
+ 'db': connection_dict.database,
+ 'passwd': password,
+ 'host': connection_dict.host,
+ 'user': connection_dict.username,
+ 'min_size': CONF.sql_min_pool_size,
+ 'max_size': CONF.sql_max_pool_size,
+ 'max_idle': CONF.sql_idle_timeout}
+ creator = db_pool.ConnectionPool(MySQLdb, **pool_args)
+ engine_args['creator'] = creator.create
+ else:
+ engine_args['pool_size'] = CONF.sql_max_pool_size
+ if CONF.sql_max_overflow is not None:
+ engine_args['max_overflow'] = CONF.sql_max_overflow
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
@@ -292,16 +499,14 @@ def create_engine(sql_connection):
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', ping_listener)
elif 'sqlite' in connection_dict.drivername:
- if not FLAGS.sqlite_synchronous:
+ if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', add_regexp_listener)
- if (FLAGS.sql_connection_trace and
+ if (CONF.sql_connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
- import MySQLdb.cursors
- _do_query = debug_mysql_do_query()
- setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
+ patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
@@ -309,7 +514,7 @@ def create_engine(sql_connection):
if not is_db_connection_error(e.args[0]):
raise
- remaining = FLAGS.sql_max_retries
+ remaining = CONF.sql_max_retries
if remaining == -1:
remaining = 'infinite'
while True:
@@ -317,7 +522,7 @@ def create_engine(sql_connection):
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
- time.sleep(FLAGS.sql_retry_interval)
+ time.sleep(CONF.sql_retry_interval)
try:
engine.connect()
break
@@ -328,15 +533,39 @@ def create_engine(sql_connection):
return engine
+class Query(sqlalchemy.orm.query.Query):
+ """Subclass of sqlalchemy.query with soft_delete() method."""
+ def soft_delete(self, synchronize_session='evaluate'):
+ return self.update({'deleted': True,
+ 'updated_at': literal_column('updated_at'),
+ 'deleted_at': timeutils.utcnow()},
+ synchronize_session=synchronize_session)
+
+
+class Session(sqlalchemy.orm.session.Session):
+ """Custom Session class to avoid SqlAlchemy Session monkey patching."""
+ @wrap_db_error
+ def query(self, *args, **kwargs):
+ return super(Session, self).query(*args, **kwargs)
+
+ @wrap_db_error
+ def flush(self, *args, **kwargs):
+ return super(Session, self).flush(*args, **kwargs)
+
+
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
+ class_=Session,
autocommit=autocommit,
- expire_on_commit=expire_on_commit)
+ expire_on_commit=expire_on_commit,
+ query_cls=Query)
-def debug_mysql_do_query():
- """Return a debug version of MySQLdb.cursors._do_query"""
+def patch_mysqldb_with_stacktrace_comments():
+ """Adds current stack trace as a comment in queries by patching
+ MySQLdb.cursors.BaseCursor._do_query.
+ """
import MySQLdb.cursors
import traceback
@@ -372,5 +601,4 @@ def debug_mysql_do_query():
qq = q
old_mysql_do_query(self, qq)
- # return the new _do_query method
- return _do_query
+ setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
diff --git a/nova/exception.py b/nova/exception.py
index ee5c482a0..c1005f866 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -29,11 +29,21 @@ import itertools
import webob.exc
+from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
+exc_log_opts = [
+ cfg.BoolOpt('fatal_exception_format_errors',
+ default=False,
+ help='make exception message format errors fatal'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(exc_log_opts)
+
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
@@ -62,19 +72,6 @@ class ProcessExecutionError(IOError):
IOError.__init__(self, message)
-def wrap_db_error(f):
- def _wrap(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except UnicodeEncodeError:
- raise InvalidUnicodeParameter()
- except Exception, e:
- LOG.exception(_('DB exception wrapped.'))
- raise DBError(e)
- _wrap.func_name = f.func_name
- return _wrap
-
-
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
@@ -85,9 +82,11 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
- def wrapped(*args, **kw):
+ def wrapped(self, context, *args, **kw):
+ # Don't store self or context in the payload, it now seems to
+ # contain confidential information.
try:
- return f(*args, **kw)
+ return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
@@ -107,10 +106,6 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# propagated.
temp_type = f.__name__
- context = get_context_from_function_and_args(f,
- args,
- kw)
-
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
@@ -150,8 +145,12 @@ class NovaException(Exception):
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
- # at least get the core message out if something happened
- message = self.message
+
+ if CONF.fatal_exception_format_errors:
+ raise e
+ else:
+ # at least get the core message out if something happened
+ message = self.message
super(NovaException, self).__init__(message)
@@ -173,6 +172,13 @@ class DBError(NovaException):
super(DBError, self).__init__(str(inner_exception))
+class DBDuplicateEntry(DBError):
+ """Wraps an implementation specific exception."""
+ def __init__(self, columns=[], inner_exception=None):
+ self.columns = columns
+ super(DBDuplicateEntry, self).__init__(inner_exception)
+
+
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text")
@@ -217,26 +223,14 @@ class Invalid(NovaException):
code = 400
-class InvalidSnapshot(Invalid):
- message = _("Invalid snapshot") + ": %(reason)s"
-
-
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
-class VolumeAttached(Invalid):
- message = _("Volume %(volume_id)s is still attached, detach volume first.")
-
-
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
-class SfJsonEncodeFailure(NovaException):
- message = _("Failed to load data into json format")
-
-
class InvalidRequest(Invalid):
message = _("The request is invalid.")
@@ -245,10 +239,6 @@ class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
-class InvalidVolumeType(Invalid):
- message = _("Invalid volume type") + ": %(reason)s"
-
-
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
@@ -418,6 +408,10 @@ class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
+class InvalidPeriodicTaskArg(Invalid):
+ message = _("Unexpected argument for periodic task creation: %(arg)s.")
+
+
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
@@ -428,67 +422,22 @@ class NotFound(NovaException):
code = 404
-class VirtDriverNotFound(NotFound):
- message = _("Could not find driver for compute_driver %(name)s")
-
-
-class PersistentVolumeFileNotFound(NotFound):
- message = _("Volume %(volume_id)s persistence file could not be found.")
+class AgentBuildNotFound(NotFound):
+ message = _("No agent-build associated with id %(id)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
-class SfAccountNotFound(NotFound):
- message = _("Unable to locate account %(account_name)s on "
- "Solidfire device")
-
-
-class VolumeMetadataNotFound(NotFound):
- message = _("Volume %(volume_id)s has no metadata with "
- "key %(metadata_key)s.")
-
-
-class VolumeTypeNotFound(NotFound):
- message = _("Volume type %(volume_type_id)s could not be found.")
-
-
-class VolumeTypeNotFoundByName(VolumeTypeNotFound):
- message = _("Volume type with name %(volume_type_name)s "
- "could not be found.")
-
-
-class VolumeTypeExtraSpecsNotFound(NotFound):
- message = _("Volume Type %(volume_type_id)s has no extra specs with "
- "key %(extra_specs_key)s.")
-
-
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
-class VolumeIsBusy(NovaException):
- message = _("deleting volume %(volume_name)s that has snapshot")
-
-
-class SnapshotIsBusy(NovaException):
- message = _("deleting snapshot %(snapshot_name)s that has "
- "dependent volumes")
-
-
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
-class ISCSITargetCreateFailed(NovaException):
- message = _("Failed to create iscsi target for volume %(volume_id)s.")
-
-
-class ISCSITargetRemoveFailed(NovaException):
- message = _("Failed to remove iscsi target for volume %(volume_id)s.")
-
-
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
@@ -520,6 +469,10 @@ class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
+class NetworkDuplicated(NovaException):
+ message = _("Network %(network_id)s is duplicated.")
+
+
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
@@ -557,10 +510,6 @@ class NetworkNotFoundForProject(NotFound):
"is not assigned to the project %(project_id)s.")
-class NetworkHostNotSet(NovaException):
- message = _("Host is not set to the network (%(network_id)s).")
-
-
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
@@ -797,7 +746,7 @@ class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
class ConsoleTypeInvalid(Invalid):
- message = _("Invalid console type %(console_type)s ")
+ message = _("Invalid console type %(console_type)s")
class InstanceTypeNotFound(NotFound):
@@ -814,8 +763,40 @@ class FlavorNotFound(NotFound):
class FlavorAccessNotFound(NotFound):
- message = _("Flavor access not found for %(flavor_id) / "
- "%(project_id) combination.")
+ message = _("Flavor access not found for %(flavor_id)s / "
+ "%(project_id)s combination.")
+
+
+class CellNotFound(NotFound):
+ message = _("Cell %(cell_name)s doesn't exist.")
+
+
+class CellRoutingInconsistency(NovaException):
+ message = _("Inconsistency in cell routing: %(reason)s")
+
+
+class CellServiceAPIMethodNotFound(NotFound):
+ message = _("Service API method not found: %(detail)s")
+
+
+class CellTimeout(NotFound):
+ message = _("Timeout waiting for response from cell")
+
+
+class CellMaxHopCountReached(NovaException):
+ message = _("Cell message has reached maximum hop count: %(hop_count)s")
+
+
+class NoCellsAvailable(NovaException):
+ message = _("No cells available matching scheduling criteria.")
+
+
+class CellError(NovaException):
+ message = _("Exception received during cell processing: %(exc_name)s.")
+
+
+class InstanceUnknownCell(NotFound):
+ message = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
@@ -900,10 +881,6 @@ class FlavorAccessExists(Duplicate):
"and project %(project_id)s combination.")
-class VolumeTypeExists(Duplicate):
- message = _("Volume Type %(name)s already exists.")
-
-
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
@@ -934,6 +911,10 @@ class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
+class ResizeError(NovaException):
+ message = _("Resize error: %(reason)s")
+
+
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
@@ -958,10 +939,6 @@ class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
-class WillNotSchedule(NovaException):
- message = _("Host %(host)s is not up or doesn't exist.")
-
-
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
@@ -974,14 +951,6 @@ class TooManyInstances(QuotaError):
" but already used %(used)d of %(allowed)d %(resource)s")
-class VolumeSizeTooLarge(QuotaError):
- message = _("Maximum volume size exceeded")
-
-
-class VolumeLimitExceeded(QuotaError):
- message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
-
-
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
@@ -1036,32 +1005,6 @@ class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
-class DuplicateSfVolumeNames(Duplicate):
- message = _("Detected more than one volume with name %(vol_name)s")
-
-
-class VolumeTypeCreateFailed(NovaException):
- message = _("Cannot create volume_type with "
- "name %(name)s and specs %(extra_specs)s")
-
-
-class VolumeBackendAPIException(NovaException):
- message = _("Bad or unexpected response from the storage volume "
- "backend API: %(data)s")
-
-
-class NfsException(NovaException):
- message = _("Unknown NFS exception")
-
-
-class NfsNoSharesMounted(NotFound):
- message = _("No mounted NFS shares found")
-
-
-class NfsNoSuitableShareFound(NotFound):
- message = _("There is no share which can host %(volume_size)sG")
-
-
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
@@ -1072,14 +1015,6 @@ class InstancePasswordSetFailed(NovaException):
safe = True
-class SolidFireAPIException(NovaException):
- message = _("Bad response from SolidFire API")
-
-
-class SolidFireAPIDataException(SolidFireAPIException):
- message = _("Error in SolidFire API response: data=%(data)s")
-
-
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
@@ -1137,26 +1072,18 @@ class UnexpectedTaskStateError(NovaException):
"the actual state is %(actual)s")
-class CryptoCAFileNotFound(FileNotFound):
- message = _("The CA file for %(project)s could not be found")
-
-
-class CryptoCRLFileNotFound(FileNotFound):
- message = _("The CRL file for %(project)s could not be found")
+class InstanceActionNotFound(NovaException):
+ message = _("Action for request_id %(request_id)s on instance"
+ " %(instance_uuid)s not found")
-def get_context_from_function_and_args(function, args, kwargs):
- """Find an arg of type RequestContext and return it.
+class InstanceActionEventNotFound(NovaException):
+ message = _("Event %(event)s not found for action id %(action_id)s")
- This is useful in a couple of decorators where we don't
- know much about the function we're wrapping.
- """
- # import here to avoid circularity:
- from nova import context
+class CryptoCAFileNotFound(FileNotFound):
+ message = _("The CA file for %(project)s could not be found")
- for arg in itertools.chain(kwargs.values(), args):
- if isinstance(arg, context.RequestContext):
- return arg
- return None
+class CryptoCRLFileNotFound(FileNotFound):
+ message = _("The CRL file for %(project)s could not be found")
diff --git a/nova/filters.py b/nova/filters.py
new file mode 100644
index 000000000..a3339eff8
--- /dev/null
+++ b/nova/filters.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Filter support
+"""
+
+from nova import loadables
+
+
+class BaseFilter(object):
+ """Base class for all filter classes."""
+ def _filter_one(self, obj, filter_properties):
+ """Return True if it passes the filter, False otherwise.
+ Override this in a subclass.
+ """
+ return True
+
+ def filter_all(self, filter_obj_list, filter_properties):
+ """Yield objects that pass the filter.
+
+ Can be overriden in a subclass, if you need to base filtering
+ decisions on all objects. Otherwise, one can just override
+ _filter_one() to filter a single object.
+ """
+ for obj in filter_obj_list:
+ if self._filter_one(obj, filter_properties):
+ yield obj
+
+
+class BaseFilterHandler(loadables.BaseLoader):
+ """Base class to handle loading filter classes.
+
+ This class should be subclassed where one needs to use filters.
+ """
+
+ def get_filtered_objects(self, filter_classes, objs,
+ filter_properties):
+ for filter_cls in filter_classes:
+ objs = filter_cls().filter_all(objs, filter_properties)
+ return list(objs)
diff --git a/nova/flags.py b/nova/flags.py
deleted file mode 100644
index 497d65ca7..000000000
--- a/nova/flags.py
+++ /dev/null
@@ -1,380 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Command-line flag library.
-
-Emulates gflags by wrapping cfg.ConfigOpts.
-
-The idea is to move fully to cfg eventually, and this wrapper is a
-stepping stone.
-
-"""
-
-import os
-import socket
-import sys
-
-from nova import config
-from nova.openstack.common import cfg
-
-CONF = config.CONF
-FLAGS = CONF
-
-
-def _get_my_ip():
- """
- Returns the actual ip of the local machine.
-
- This code figures out what source address would be used if some traffic
- were to be sent out to some well known address on the Internet. In this
- case, a Google DNS server is used, but the specific address does not
- matter much. No traffic is actually sent.
- """
- try:
- csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- csock.connect(('8.8.8.8', 80))
- (addr, port) = csock.getsockname()
- csock.close()
- return addr
- except socket.error:
- return "127.0.0.1"
-
-
-core_opts = [
- cfg.StrOpt('sql_connection',
- default='sqlite:///$state_path/$sqlite_db',
- help='The SQLAlchemy connection string used to connect to the '
- 'database'),
- cfg.StrOpt('api_paste_config',
- default="api-paste.ini",
- help='File name for the paste.deploy config for nova-api'),
- cfg.StrOpt('pybasedir',
- default=os.path.abspath(os.path.join(os.path.dirname(__file__),
- '../')),
- help='Directory where the nova python module is installed'),
- cfg.StrOpt('bindir',
- default='$pybasedir/bin',
- help='Directory where nova binaries are installed'),
- cfg.StrOpt('state_path',
- default='$pybasedir',
- help="Top-level directory for maintaining nova's state"),
- ]
-
-debug_opts = [
- cfg.BoolOpt('fake_network',
- default=False,
- help='If passed, use fake network devices and addresses'),
- cfg.IntOpt('sql_connection_debug',
- default=0,
- help='Verbosity of SQL debugging information. 0=None, '
- '100=Everything'),
- cfg.BoolOpt('sql_connection_trace',
- default=False,
- help='Add python stack traces to SQL as comment strings'),
-]
-
-CONF.register_cli_opts(core_opts)
-CONF.register_cli_opts(debug_opts)
-
-global_opts = [
- cfg.StrOpt('my_ip',
- default=_get_my_ip(),
- help='ip address of this host'),
- cfg.ListOpt('region_list',
- default=[],
- help='list of region=fqdn pairs separated by commas'),
- cfg.StrOpt('aws_access_key_id',
- default='admin',
- help='AWS Access ID'),
- cfg.StrOpt('aws_secret_access_key',
- default='admin',
- help='AWS Access Key'),
- cfg.StrOpt('glance_host',
- default='$my_ip',
- help='default glance hostname or ip'),
- cfg.IntOpt('glance_port',
- default=9292,
- help='default glance port'),
- cfg.ListOpt('glance_api_servers',
- default=['$glance_host:$glance_port'],
- help='A list of the glance api servers available to nova. '
- 'Prefix with https:// for ssl-based glance api servers. '
- '([hostname|ip]:port)'),
- cfg.BoolOpt('glance_api_insecure',
- default=False,
- help='Allow to perform insecure SSL (https) requests to '
- 'glance'),
- cfg.IntOpt('glance_num_retries',
- default=0,
- help='Number retries when downloading an image from glance'),
- cfg.IntOpt('s3_port',
- default=3333,
- help='port used when accessing the s3 api'),
- cfg.StrOpt('s3_host',
- default='$my_ip',
- help='hostname or ip for openstack to use when accessing '
- 'the s3 api'),
- cfg.StrOpt('cert_topic',
- default='cert',
- help='the topic cert nodes listen on'),
- cfg.StrOpt('compute_topic',
- default='compute',
- help='the topic compute nodes listen on'),
- cfg.StrOpt('console_topic',
- default='console',
- help='the topic console proxy nodes listen on'),
- cfg.StrOpt('scheduler_topic',
- default='scheduler',
- help='the topic scheduler nodes listen on'),
- cfg.StrOpt('network_topic',
- default='network',
- help='the topic network nodes listen on'),
- cfg.BoolOpt('api_rate_limit',
- default=True,
- help='whether to rate limit the api'),
- cfg.ListOpt('enabled_apis',
- default=['ec2', 'osapi_compute', 'metadata'],
- help='a list of APIs to enable by default'),
- cfg.StrOpt('ec2_host',
- default='$my_ip',
- help='the ip of the ec2 api server'),
- cfg.StrOpt('ec2_dmz_host',
- default='$my_ip',
- help='the internal ip of the ec2 api server'),
- cfg.IntOpt('ec2_port',
- default=8773,
- help='the port of the ec2 api server'),
- cfg.StrOpt('ec2_scheme',
- default='http',
- help='the protocol to use when connecting to the ec2 api '
- 'server (http, https)'),
- cfg.StrOpt('ec2_path',
- default='/services/Cloud',
- help='the path prefix used to call the ec2 api server'),
- cfg.ListOpt('osapi_compute_ext_list',
- default=[],
- help='Specify list of extensions to load when using osapi_'
- 'compute_extension option with nova.api.openstack.'
- 'compute.contrib.select_extensions'),
- cfg.MultiStrOpt('osapi_compute_extension',
- default=[
- 'nova.api.openstack.compute.contrib.standard_extensions'
- ],
- help='osapi compute extension to load'),
- cfg.StrOpt('osapi_path',
- default='/v1.1/',
- help='the path prefix used to call the openstack api server'),
- cfg.StrOpt('osapi_compute_link_prefix',
- default=None,
- help='Base URL that will be presented to users in links '
- 'to the OpenStack Compute API'),
- cfg.StrOpt('osapi_glance_link_prefix',
- default=None,
- help='Base URL that will be presented to users in links '
- 'to glance resources'),
- cfg.IntOpt('osapi_max_limit',
- default=1000,
- help='the maximum number of items returned in a single '
- 'response from a collection resource'),
- cfg.StrOpt('metadata_host',
- default='$my_ip',
- help='the ip for the metadata api server'),
- cfg.IntOpt('metadata_port',
- default=8775,
- help='the port for the metadata api port'),
- cfg.StrOpt('default_image',
- default='ami-11111',
- help='default image to use, testing only'),
- cfg.StrOpt('default_instance_type',
- default='m1.small',
- help='default instance type to use, testing only'),
- cfg.StrOpt('null_kernel',
- default='nokernel',
- help='kernel image that indicates not to use a kernel, but to '
- 'use a raw disk image instead'),
- cfg.StrOpt('vpn_image_id',
- default='0',
- help='image id used when starting up a cloudpipe vpn server'),
- cfg.StrOpt('vpn_key_suffix',
- default='-vpn',
- help='Suffix to add to project name for vpn key and secgroups'),
- cfg.StrOpt('sqlite_db',
- default='nova.sqlite',
- help='the filename to use with sqlite'),
- cfg.BoolOpt('sqlite_synchronous',
- default=True,
- help='If passed, use synchronous mode for sqlite'),
- cfg.IntOpt('sql_idle_timeout',
- default=3600,
- help='timeout before idle sql connections are reaped'),
- cfg.IntOpt('sql_max_retries',
- default=10,
- help='maximum db connection retries during startup. '
- '(setting -1 implies an infinite retry count)'),
- cfg.IntOpt('sql_retry_interval',
- default=10,
- help='interval between retries of opening a sql connection'),
- cfg.StrOpt('compute_manager',
- default='nova.compute.manager.ComputeManager',
- help='full class name for the Manager for compute'),
- cfg.StrOpt('console_manager',
- default='nova.console.manager.ConsoleProxyManager',
- help='full class name for the Manager for console proxy'),
- cfg.StrOpt('cert_manager',
- default='nova.cert.manager.CertManager',
- help='full class name for the Manager for cert'),
- cfg.StrOpt('instance_dns_manager',
- default='nova.network.dns_driver.DNSDriver',
- help='full class name for the DNS Manager for instance IPs'),
- cfg.StrOpt('instance_dns_domain',
- default='',
- help='full class name for the DNS Zone for instance IPs'),
- cfg.StrOpt('floating_ip_dns_manager',
- default='nova.network.dns_driver.DNSDriver',
- help='full class name for the DNS Manager for floating IPs'),
- cfg.StrOpt('network_manager',
- default='nova.network.manager.VlanManager',
- help='full class name for the Manager for network'),
- cfg.StrOpt('scheduler_manager',
- default='nova.scheduler.manager.SchedulerManager',
- help='full class name for the Manager for scheduler'),
- cfg.StrOpt('host',
- default=socket.getfqdn(),
- help='Name of this node. This can be an opaque identifier. '
- 'It is not necessarily a hostname, FQDN, or IP address. '
- 'However, the node name must be valid within '
- 'an AMQP key, and if using ZeroMQ, a valid '
- 'hostname, FQDN, or IP address'),
- cfg.StrOpt('node_availability_zone',
- default='nova',
- help='availability zone of this node'),
- cfg.ListOpt('memcached_servers',
- default=None,
- help='Memcached servers or None for in process cache.'),
- cfg.StrOpt('instance_usage_audit_period',
- default='month',
- help='time period to generate instance usages for. '
- 'Time period must be hour, day, month or year'),
- cfg.IntOpt('bandwidth_poll_interval',
- default=600,
- help='interval to pull bandwidth usage info'),
- cfg.BoolOpt('start_guests_on_host_boot',
- default=False,
- help='Whether to restart guests when the host reboots'),
- cfg.BoolOpt('resume_guests_state_on_host_boot',
- default=False,
- help='Whether to start guests that were running before the '
- 'host rebooted'),
- cfg.StrOpt('default_ephemeral_format',
- default=None,
- help='The default format an ephemeral_volume will be '
- 'formatted with on creation.'),
- cfg.StrOpt('rootwrap_config',
- default="/etc/nova/rootwrap.conf",
- help='Path to the rootwrap configuration file to use for '
- 'running commands as root'),
- cfg.StrOpt('network_driver',
- default='nova.network.linux_net',
- help='Driver to use for network creation'),
- cfg.BoolOpt('use_ipv6',
- default=False,
- help='use ipv6'),
- cfg.BoolOpt('enable_instance_password',
- default=True,
- help='Allows use of instance password during '
- 'server creation'),
- cfg.IntOpt('password_length',
- default=12,
- help='Length of generated instance admin passwords'),
- cfg.BoolOpt('monkey_patch',
- default=False,
- help='Whether to log monkey patching'),
- cfg.ListOpt('monkey_patch_modules',
- default=[
- 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
- 'nova.compute.api:nova.notifier.api.notify_decorator'
- ],
- help='List of modules/decorators to monkey patch'),
- cfg.BoolOpt('allow_resize_to_same_host',
- default=False,
- help='Allow destination machine to match source for resize. '
- 'Useful when testing in single-host environments.'),
- cfg.IntOpt('reclaim_instance_interval',
- default=0,
- help='Interval in seconds for reclaiming deleted instances'),
- cfg.IntOpt('zombie_instance_updated_at_window',
- default=172800,
- help='Number of seconds zombie instances are cleaned up.'),
- cfg.IntOpt('service_down_time',
- default=60,
- help='maximum time since last check-in for up service'),
- cfg.StrOpt('default_schedule_zone',
- default=None,
- help='availability zone to use when user doesn\'t specify one'),
- cfg.ListOpt('isolated_images',
- default=[],
- help='Images to run on isolated host'),
- cfg.ListOpt('isolated_hosts',
- default=[],
- help='Host reserved for specific images'),
- cfg.StrOpt('cache_images',
- default='all',
- help='Cache glance images locally. `all` will cache all'
- ' images, `some` will only cache images that have the'
- ' image_property `cache_in_nova=True`, and `none` turns'
- ' off caching entirely'),
- cfg.BoolOpt('use_cow_images',
- default=True,
- help='Whether to use cow images'),
- cfg.StrOpt('compute_api_class',
- default='nova.compute.api.API',
- help='The full class name of the compute API class to use'),
- cfg.StrOpt('network_api_class',
- default='nova.network.api.API',
- help='The full class name of the network API class to use'),
- cfg.StrOpt('volume_api_class',
- default='nova.volume.cinder.API',
- help='The full class name of the volume API class to use'),
- cfg.StrOpt('security_group_handler',
- default='nova.network.sg.NullSecurityGroupHandler',
- help='The full class name of the security group handler class'),
- cfg.StrOpt('default_access_ip_network_name',
- default=None,
- help='Name of network to use to set access ips for instances'),
- cfg.StrOpt('auth_strategy',
- default='noauth',
- help='The strategy to use for auth: noauth or keystone.'),
- cfg.ListOpt('non_inheritable_image_properties',
- default=['cache_in_nova',
- 'bittorrent'],
- help='These are image properties which a snapshot should not'
- ' inherit from an instance'),
- cfg.BoolOpt('defer_iptables_apply',
- default=False,
- help='Whether to batch up the application of IPTables rules'
- ' during a host restart and apply all at the end of the'
- ' init phase'),
- cfg.StrOpt('compute_driver',
- help='Driver to use for controlling virtualization. Options '
- 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
- 'fake.FakeDriver, baremetal.BareMetalDriver, '
- 'vmwareapi.VMWareESXDriver'),
-]
-
-CONF.register_opts(global_opts)
diff --git a/nova/hooks.py b/nova/hooks.py
new file mode 100644
index 000000000..8a9c77e73
--- /dev/null
+++ b/nova/hooks.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Decorator and config option definitions for adding custom code (hooks)
+around callables.
+
+Any method may have the 'add_hook' decorator applied, which yields the
+ability to invoke Hook objects before or after the method. (i.e. pre and
+post)
+
+Hook objects are loaded by HookLoaders. Each named hook may invoke multiple
+Hooks.
+
+Example Hook object:
+
+class MyHook(object):
+ def pre(self, *args, **kwargs):
+ # do stuff before wrapped callable runs
+
+ def post(self, rv, *args, **kwargs):
+ # do stuff after wrapped callable runs
+
+
+"""
+
+import functools
+
+import stevedore
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+NS = 'nova.hooks'
+
+_HOOKS = {} # hook name => hook manager
+
+
+class HookManager(stevedore.hook.HookManager):
+ def __init__(self, name):
+ # invoke_on_load creates an instance of the Hook class
+ super(HookManager, self).__init__(NS, name, invoke_on_load=True)
+
+ def run_pre(self, name, args, kwargs):
+ for e in self.extensions:
+ obj = e.obj
+ pre = getattr(obj, 'pre', None)
+ if pre:
+ LOG.debug(_("Running %(name)s pre-hook: %(obj)s") % locals())
+ pre(*args, **kwargs)
+
+ def run_post(self, name, rv, args, kwargs):
+ for e in reversed(self.extensions):
+ obj = e.obj
+ post = getattr(obj, 'post', None)
+ if post:
+ LOG.debug(_("Running %(name)s post-hook: %(obj)s") % locals())
+ post(rv, *args, **kwargs)
+
+
+def add_hook(name):
+ """Execute optional pre and post methods around the decorated
+ function. This is useful for customization around callables.
+ """
+
+ def outer(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ manager = _HOOKS.setdefault(name, HookManager(name))
+
+ manager.run_pre(name, args, kwargs)
+ rv = f(*args, **kwargs)
+ manager.run_post(name, rv, args, kwargs)
+
+ return rv
+
+ return inner
+ return outer
+
+
+def reset():
+ """Clear loaded hooks."""
+ _HOOKS.clear()
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 0cbc91531..75551d35c 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of an image service that uses Glance as the backend"""
+"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
@@ -29,16 +29,53 @@ import urlparse
import glanceclient
import glanceclient.exc
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+glance_opts = [
+ cfg.StrOpt('glance_host',
+ default='$my_ip',
+ help='default glance hostname or ip'),
+ cfg.IntOpt('glance_port',
+ default=9292,
+ help='default glance port'),
+ cfg.StrOpt('glance_protocol',
+ default='http',
+ help='Default protocol to use when connecting to glance. '
+ 'Set to https for SSL.'),
+ cfg.ListOpt('glance_api_servers',
+ default=['$glance_host:$glance_port'],
+ help='A list of the glance api servers available to nova. '
+ 'Prefix with https:// for ssl-based glance api servers. '
+ '([hostname|ip]:port)'),
+ cfg.BoolOpt('glance_api_insecure',
+ default=False,
+ help='Allow to perform insecure SSL (https) requests to '
+ 'glance'),
+ cfg.IntOpt('glance_num_retries',
+ default=0,
+ help='Number retries when downloading an image from glance'),
+]
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.register_opts(glance_opts)
+CONF.import_opt('auth_strategy', 'nova.api.auth')
+CONF.import_opt('my_ip', 'nova.netconf')
+
+
+def generate_glance_url():
+ """Generate the URL to glance."""
+ return "%s://%s:%d" % (CONF.glance_protocol, CONF.glance_host,
+ CONF.glance_port)
+
+
+def generate_image_url(image_ref):
+ """Generate an image URL from an image_ref."""
+ return "%s/images/%s" % (generate_glance_url(), image_ref)
def _parse_image_ref(image_href):
@@ -58,7 +95,7 @@ def _parse_image_ref(image_href):
def _create_glance_client(context, host, port, use_ssl, version=1):
- """Instantiate a new glanceclient.Client object"""
+ """Instantiate a new glanceclient.Client object."""
if use_ssl:
scheme = 'https'
else:
@@ -260,12 +297,15 @@ class GlanceImageService(object):
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
+ :raises: ImageNotAuthorized if the user is not authorized.
"""
try:
self._client.call(context, 1, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
+ except glanceclient.exc.HTTPForbidden:
+ raise exception.ImageNotAuthorized(image_id=image_id)
return True
@staticmethod
diff --git a/nova/image/s3.py b/nova/image/s3.py
index d252baba0..833fb44f9 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -31,9 +31,7 @@ from lxml import etree
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
-from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -46,6 +44,13 @@ s3_opts = [
cfg.StrOpt('image_decryption_dir',
default='/tmp',
help='parent dir for tempdir used for image decryption'),
+ cfg.StrOpt('s3_host',
+ default='$my_ip',
+ help='hostname or ip for openstack to use when accessing '
+ 'the s3 api'),
+ cfg.IntOpt('s3_port',
+ default=3333,
+ help='port used when accessing the s3 api'),
cfg.StrOpt('s3_access_key',
default='notchecked',
help='access key to use for s3 server for images'),
@@ -61,8 +66,9 @@ s3_opts = [
'when downloading from s3'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(s3_opts)
+CONF.import_opt('my_ip', 'nova.netconf')
class S3ImageService(object):
@@ -395,7 +401,7 @@ class S3ImageService(object):
@staticmethod
def _test_for_malicious_tarball(path, filename):
- """Raises exception if extracting tarball would escape extract path"""
+ """Raises exception if extracting tarball would escape extract path."""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
index 1b4c99fbb..d50a66949 100644
--- a/nova/ipv6/account_identifier.py
+++ b/nova/ipv6/account_identifier.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""IPv6 address generation with account identifier embedded"""
+"""IPv6 address generation with account identifier embedded."""
import hashlib
import netaddr
diff --git a/nova/ipv6/api.py b/nova/ipv6/api.py
index 96e30e966..43a185101 100644
--- a/nova/ipv6/api.py
+++ b/nova/ipv6/api.py
@@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova import utils
@@ -24,7 +22,7 @@ ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
help='Backend to use for IPv6 generation')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(ipv6_backend_opt)
IMPL = None
diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py
index dec0935f5..147fe6876 100644
--- a/nova/ipv6/rfc2462.py
+++ b/nova/ipv6/rfc2462.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""RFC2462 style IPv6 address generation"""
+"""RFC2462 style IPv6 address generation."""
import netaddr
diff --git a/nova/loadables.py b/nova/loadables.py
new file mode 100644
index 000000000..964845184
--- /dev/null
+++ b/nova/loadables.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Generic Loadable class support.
+
+Meant to be used by such things as scheduler filters and weights where we
+want to load modules from certain directories and find certain types of
+classes within those modules. Note that this is quite different than
+generic plugins and the pluginmanager code that exists elsewhere.
+
+Usage:
+
+Create a directory with an __init__.py with code such as:
+
+class SomeLoadableClass(object):
+ pass
+
+
+class MyLoader(nova.loadables.BaseLoader)
+ def __init__(self):
+ super(MyLoader, self).__init__(SomeLoadableClass)
+
+If you create modules in the same directory and subclass SomeLoadableClass
+within them, MyLoader().get_all_classes() will return a list
+of such classes.
+"""
+
+import inspect
+import os
+import sys
+
+from nova import exception
+from nova.openstack.common import importutils
+
+
+class BaseLoader(object):
+ def __init__(self, loadable_cls_type):
+ mod = sys.modules[self.__class__.__module__]
+ self.path = os.path.abspath(mod.__path__[0])
+ self.package = mod.__package__
+ self.loadable_cls_type = loadable_cls_type
+
+ def _is_correct_class(self, obj):
+ """Return whether an object is a class of the correct type and
+ is not prefixed with an underscore.
+ """
+ return (inspect.isclass(obj) and
+ (not obj.__name__.startswith('_')) and
+ issubclass(obj, self.loadable_cls_type))
+
+ def _get_classes_from_module(self, module_name):
+ """Get the classes from a module that match the type we want."""
+ classes = []
+ module = importutils.import_module(module_name)
+ for obj_name in dir(module):
+ # Skip objects that are meant to be private.
+ if obj_name.startswith('_'):
+ continue
+ itm = getattr(module, obj_name)
+ if self._is_correct_class(itm):
+ classes.append(itm)
+ return classes
+
+ def get_all_classes(self):
+ """Get the classes of the type we want from all modules found
+ in the directory that defines this class.
+ """
+ classes = []
+ for dirpath, dirnames, filenames in os.walk(self.path):
+ relpath = os.path.relpath(dirpath, self.path)
+ if relpath == '.':
+ relpkg = ''
+ else:
+ relpkg = '.%s' % '.'.join(relpath.split(os.sep))
+ for fname in filenames:
+ root, ext = os.path.splitext(fname)
+ if ext != '.py' or root == '__init__':
+ continue
+ module_name = "%s%s.%s" % (self.package, relpkg, root)
+ mod_classes = self._get_classes_from_module(module_name)
+ classes.extend(mod_classes)
+ return classes
+
+ def get_matching_classes(self, loadable_class_names):
+ """Get loadable classes from a list of names. Each name can be
+ a full module path or the full path to a method that returns
+ classes to use. The latter behavior is useful to specify a method
+ that returns a list of classes to use in a default case.
+ """
+ classes = []
+ for cls_name in loadable_class_names:
+ obj = importutils.import_class(cls_name)
+ if self._is_correct_class(obj):
+ classes.append(obj)
+ elif inspect.isfunction(obj):
+ # Get list of classes from a function
+ for cls in obj():
+ classes.append(cls)
+ else:
+ error_str = 'Not a class of the correct type'
+ raise exception.ClassNotFound(class_name=cls_name,
+ exception=error_str)
+ return classes
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 5301ee2c5..347b98733 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -1,14 +1,14 @@
# Translations template for nova.
-# Copyright (C) 2012 ORGANIZATION
+# Copyright (C) 2013 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2012.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2013.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2012-11-07 00:02+0000\n"
+"POT-Creation-Date: 2013-01-06 00:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -27,50 +27,54 @@ msgstr ""
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
-#: nova/crypto.py:47
+#: nova/crypto.py:46
msgid "Filename of root CA"
msgstr ""
-#: nova/crypto.py:50
+#: nova/crypto.py:49
msgid "Filename of private key"
msgstr ""
-#: nova/crypto.py:53
+#: nova/crypto.py:52
msgid "Filename of root Certificate Revocation List"
msgstr ""
-#: nova/crypto.py:56
+#: nova/crypto.py:55
msgid "Where we keep our keys"
msgstr ""
-#: nova/crypto.py:59
+#: nova/crypto.py:58
msgid "Where we keep our root CA"
msgstr ""
-#: nova/crypto.py:62
+#: nova/crypto.py:61
msgid "Should we use a CA for each project?"
msgstr ""
-#: nova/crypto.py:66
+#: nova/crypto.py:65
#, python-format
msgid "Subject for certificate for users, %s for project, user, timestamp"
msgstr ""
-#: nova/crypto.py:71
+#: nova/crypto.py:70
#, python-format
msgid "Subject for certificate for projects, %s for project, timestamp"
msgstr ""
-#: nova/crypto.py:301
+#: nova/crypto.py:300
+msgid "Failed to write inbound.csr"
+msgstr ""
+
+#: nova/crypto.py:303
#, python-format
msgid "Flags path: %s"
msgstr ""
-#: nova/exception.py:56
+#: nova/exception.py:66
msgid "Unexpected error while running command."
msgstr ""
-#: nova/exception.py:59
+#: nova/exception.py:69
#, python-format
msgid ""
"%(description)s\n"
@@ -80,261 +84,240 @@ msgid ""
"Stderr: %(stderr)r"
msgstr ""
-#: nova/exception.py:72
-msgid "DB exception wrapped."
-msgstr ""
-
-#: nova/exception.py:131
+#: nova/exception.py:126
msgid "An unknown exception occurred."
msgstr ""
-#: nova/exception.py:152 nova/openstack/common/rpc/common.py:46
+#: nova/exception.py:147 nova/openstack/common/rpc/common.py:47
msgid "Exception in string format operation"
msgstr ""
-#: nova/exception.py:162
+#: nova/exception.py:161
msgid "Unknown"
msgstr ""
-#: nova/exception.py:179
+#: nova/exception.py:185
msgid "Failed to decrypt text"
msgstr ""
-#: nova/exception.py:183
+#: nova/exception.py:189
msgid "Virtual Interface creation failed"
msgstr ""
-#: nova/exception.py:187
+#: nova/exception.py:193
msgid "5 attempts to create virtual interfacewith unique mac address failed"
msgstr ""
-#: nova/exception.py:192
+#: nova/exception.py:198
#, python-format
msgid "Connection to glance host %(host)s:%(port)s failed: %(reason)s"
msgstr ""
-#: nova/exception.py:197
+#: nova/exception.py:203
msgid "Not authorized."
msgstr ""
-#: nova/exception.py:202
+#: nova/exception.py:208
msgid "User does not have admin privileges"
msgstr ""
-#: nova/exception.py:206
+#: nova/exception.py:212
#, python-format
msgid "Policy doesn't allow %(action)s to be performed."
msgstr ""
-#: nova/exception.py:210
+#: nova/exception.py:216
#, python-format
msgid "Image %(image_id)s is not active."
msgstr ""
-#: nova/exception.py:214
+#: nova/exception.py:220
#, python-format
msgid "Not authorized for image %(image_id)s."
msgstr ""
-#: nova/exception.py:218
+#: nova/exception.py:224
msgid "Unacceptable parameters."
msgstr ""
-#: nova/exception.py:223
-msgid "Invalid snapshot"
-msgstr ""
-
-#: nova/exception.py:227
+#: nova/exception.py:229
#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:231
-#, python-format
-msgid "Volume %(volume_id)s is still attached, detach volume first."
-msgstr ""
-
-#: nova/exception.py:235 nova/api/ec2/cloud.py:390 nova/api/ec2/cloud.py:415
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2238
+#: nova/exception.py:233 nova/api/ec2/cloud.py:447 nova/api/ec2/cloud.py:472
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2321
msgid "Keypair data is invalid"
msgstr ""
-#: nova/exception.py:239
-msgid "Failed to load data into json format"
-msgstr ""
-
-#: nova/exception.py:243
+#: nova/exception.py:237
msgid "The request is invalid."
msgstr ""
-#: nova/exception.py:247
+#: nova/exception.py:241
msgid "Invalid input received"
msgstr ""
-#: nova/exception.py:251
-msgid "Invalid volume type"
-msgstr ""
-
-#: nova/exception.py:255
+#: nova/exception.py:245
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1285
-#: nova/api/openstack/compute/contrib/admin_actions.py:239
+#: nova/exception.py:249 nova/api/openstack/compute/servers.py:1307
+#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
-#: nova/exception.py:263
+#: nova/exception.py:253
msgid "Invalid metadata size"
msgstr ""
-#: nova/exception.py:267
+#: nova/exception.py:257
#, python-format
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:271 nova/api/ec2/cloud.py:572
+#: nova/exception.py:261 nova/api/ec2/cloud.py:629
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
-#: nova/exception.py:275
+#: nova/exception.py:265
#, python-format
msgid "Invalid content type %(content_type)s."
msgstr ""
-#: nova/exception.py:279
+#: nova/exception.py:269
#, python-format
msgid "Invalid cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:283
+#: nova/exception.py:273
msgid "Invalid Parameter: Unicode is not supported by the current database."
msgstr ""
-#: nova/exception.py:290
+#: nova/exception.py:280
#, python-format
msgid "%(err)s"
msgstr ""
-#: nova/exception.py:294
+#: nova/exception.py:284
#, python-format
msgid ""
"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:"
" %(reason)s."
msgstr ""
-#: nova/exception.py:299
+#: nova/exception.py:289
#, python-format
msgid "Group not valid. Reason: %(reason)s"
msgstr ""
-#: nova/exception.py:303
+#: nova/exception.py:293
msgid "Sort key supplied was not valid."
msgstr ""
-#: nova/exception.py:307
+#: nova/exception.py:297
#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while"
" the instance is in this state."
msgstr ""
-#: nova/exception.py:312
+#: nova/exception.py:302
#, python-format
msgid "Instance %(instance_id)s is not running."
msgstr ""
-#: nova/exception.py:316
+#: nova/exception.py:306
#, python-format
msgid "Instance %(instance_id)s is not in rescue mode"
msgstr ""
-#: nova/exception.py:320
+#: nova/exception.py:310
#, python-format
msgid "Instance %(instance_id)s is not ready"
msgstr ""
-#: nova/exception.py:324
+#: nova/exception.py:314
msgid "Failed to suspend instance"
msgstr ""
-#: nova/exception.py:328
+#: nova/exception.py:318
msgid "Failed to resume server"
msgstr ""
-#: nova/exception.py:332
+#: nova/exception.py:322
msgid "Failed to reboot instance"
msgstr ""
-#: nova/exception.py:336
+#: nova/exception.py:326
msgid "Failed to terminate instance"
msgstr ""
-#: nova/exception.py:340
+#: nova/exception.py:330
msgid "Service is unavailable at this time."
msgstr ""
-#: nova/exception.py:344
+#: nova/exception.py:334
msgid "Insufficient compute resources."
msgstr ""
-#: nova/exception.py:348
+#: nova/exception.py:338
msgid "Compute service is unavailable at this time."
msgstr ""
-#: nova/exception.py:352
+#: nova/exception.py:342
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
-#: nova/exception.py:357
+#: nova/exception.py:347
msgid "The supplied hypervisor type of is invalid."
msgstr ""
-#: nova/exception.py:361
+#: nova/exception.py:351
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
-#: nova/exception.py:366
+#: nova/exception.py:356
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
msgstr ""
-#: nova/exception.py:371
+#: nova/exception.py:361
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr ""
-#: nova/exception.py:375
+#: nova/exception.py:365
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:379
+#: nova/exception.py:369
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:383
+#: nova/exception.py:373
msgid "Unacceptable CPU info"
msgstr ""
-#: nova/exception.py:387
+#: nova/exception.py:377
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:391
+#: nova/exception.py:381
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:397
+#: nova/exception.py:387
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -342,132 +325,80 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:404
+#: nova/exception.py:394
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:408
+#: nova/exception.py:398
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:412
+#: nova/exception.py:402
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:416
+#: nova/exception.py:406
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:420
+#: nova/exception.py:410
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:424
+#: nova/exception.py:414
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:429
+#: nova/exception.py:419
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:434
-#, python-format
-msgid "Could not find driver for compute_driver %(name)s"
-msgstr ""
-
-#: nova/exception.py:438
+#: nova/exception.py:424
#, python-format
-msgid "Volume %(volume_id)s persistence file could not be found."
+msgid "No agent-build associated with id %(id)s."
msgstr ""
-#: nova/exception.py:442
+#: nova/exception.py:428
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:446
-#, python-format
-msgid "Unable to locate account %(account_name)s on Solidfire device"
-msgstr ""
-
-#: nova/exception.py:451
-#, python-format
-msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s."
-msgstr ""
-
-#: nova/exception.py:456
-#, python-format
-msgid "Volume type %(volume_type_id)s could not be found."
-msgstr ""
-
-#: nova/exception.py:460
-#, python-format
-msgid "Volume type with name %(volume_type_name)s could not be found."
-msgstr ""
-
-#: nova/exception.py:465
-#, python-format
-msgid ""
-"Volume Type %(volume_type_id)s has no extra specs with key "
-"%(extra_specs_key)s."
-msgstr ""
-
-#: nova/exception.py:470
+#: nova/exception.py:432
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:474
-#, python-format
-msgid "deleting volume %(volume_name)s that has snapshot"
-msgstr ""
-
-#: nova/exception.py:478
-#, python-format
-msgid "deleting snapshot %(snapshot_name)s that has dependent volumes"
-msgstr ""
-
-#: nova/exception.py:483
+#: nova/exception.py:436
#, python-format
msgid "No target id found for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:487
-#, python-format
-msgid "Failed to create iscsi target for volume %(volume_id)s."
-msgstr ""
-
-#: nova/exception.py:491
-#, python-format
-msgid "Failed to remove iscsi target for volume %(volume_id)s."
-msgstr ""
-
-#: nova/exception.py:495
+#: nova/exception.py:440
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:499
+#: nova/exception.py:444
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:503
+#: nova/exception.py:448
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:507
+#: nova/exception.py:452
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:511
+#: nova/exception.py:456
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -475,738 +406,735 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:518
+#: nova/exception.py:463
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:522
+#: nova/exception.py:467
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:526
+#: nova/exception.py:471
+#, python-format
+msgid "Network %(network_id)s is duplicated."
+msgstr ""
+
+#: nova/exception.py:475
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:530
+#: nova/exception.py:479
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:534
+#: nova/exception.py:483
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:538
+#: nova/exception.py:487
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:542
+#: nova/exception.py:491
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:546
+#: nova/exception.py:495
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:550
+#: nova/exception.py:499
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:554
+#: nova/exception.py:503
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:558
+#: nova/exception.py:507
#, python-format
msgid ""
"Either Network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:563
-#, python-format
-msgid "Host is not set to the network (%(network_id)s)."
-msgstr ""
-
-#: nova/exception.py:567
+#: nova/exception.py:512
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:571
+#: nova/exception.py:516
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:575
+#: nova/exception.py:520
#, python-format
msgid "Port %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:579
+#: nova/exception.py:524
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:583
+#: nova/exception.py:528
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:587
+#: nova/exception.py:532
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:591
+#: nova/exception.py:536
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:596
+#: nova/exception.py:541
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:600
+#: nova/exception.py:545
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:605
+#: nova/exception.py:550
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:610
+#: nova/exception.py:555
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:615
+#: nova/exception.py:560
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:619
+#: nova/exception.py:564
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:623
+#: nova/exception.py:568
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:632
+#: nova/exception.py:577
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:636
+#: nova/exception.py:581
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:640
+#: nova/exception.py:585
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:644
+#: nova/exception.py:589
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:648
+#: nova/exception.py:593
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:652
+#: nova/exception.py:597
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:656
+#: nova/exception.py:601
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:661
+#: nova/exception.py:606
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:666
+#: nova/exception.py:611
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:670
+#: nova/exception.py:615
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:674
+#: nova/exception.py:619
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:678
+#: nova/exception.py:623
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:682
+#: nova/exception.py:627
msgid "Cannot disassociate auto assigined floating ip"
msgstr ""
-#: nova/exception.py:686
+#: nova/exception.py:631
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:690
+#: nova/exception.py:635
#, python-format
msgid "Certificate %(certificate_id)s not found."
msgstr ""
-#: nova/exception.py:694
+#: nova/exception.py:639
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:698
+#: nova/exception.py:643
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:702
+#: nova/exception.py:647
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:706
+#: nova/exception.py:651
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:710
+#: nova/exception.py:655
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:714
+#: nova/exception.py:659
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:719
+#: nova/exception.py:664
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:723
+#: nova/exception.py:668
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:727
+#: nova/exception.py:672
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:731
+#: nova/exception.py:676
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:735
+#: nova/exception.py:680
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:739
+#: nova/exception.py:684
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:743
+#: nova/exception.py:688
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:747
+#: nova/exception.py:692
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:751
+#: nova/exception.py:696
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:756
+#: nova/exception.py:701
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:760
+#: nova/exception.py:705
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:765
+#: nova/exception.py:710
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:770
+#: nova/exception.py:715
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:774
+#: nova/exception.py:719
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:779
+#: nova/exception.py:724
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:783
+#: nova/exception.py:728
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:789
+#: nova/exception.py:734
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:793
+#: nova/exception.py:738
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:797
+#: nova/exception.py:742
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:802
+#: nova/exception.py:747
#, python-format
-msgid "Invalid console type %(console_type)s "
+msgid "Invalid console type %(console_type)s"
msgstr ""
-#: nova/exception.py:806
+#: nova/exception.py:751
#, python-format
msgid "Instance type %(instance_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:755
#, python-format
msgid "Instance type with name %(instance_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:815
+#: nova/exception.py:760
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:764
#, python-format
-msgid "Flavor access not found for %(flavor_id) / %(project_id) combination."
+msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
-#: nova/exception.py:824
+#: nova/exception.py:769
+#, python-format
+msgid "Cell %(cell_id)s could not be found."
+msgstr ""
+
+#: nova/exception.py:773
+#, python-format
+msgid "Inconsistency in cell routing: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:777
+#, python-format
+msgid "Service API method not found: %(detail)s"
+msgstr ""
+
+#: nova/exception.py:781
+msgid "Timeout waiting for response from cell"
+msgstr ""
+
+#: nova/exception.py:785
+#, python-format
+msgid "Cell message has reached maximum hop count: %(hop_count)s"
+msgstr ""
+
+#: nova/exception.py:789
+msgid "No cells available matching scheduling criteria."
+msgstr ""
+
+#: nova/exception.py:793
+#, python-format
+msgid "Exception received during cell processing: %(exc_name)s."
+msgstr ""
+
+#: nova/exception.py:797
+#, python-format
+msgid "Cell is not known for instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/exception.py:801
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:828
+#: nova/exception.py:805
#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:833
+#: nova/exception.py:810
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:837
+#: nova/exception.py:814
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:842
+#: nova/exception.py:819
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:847
+#: nova/exception.py:824
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:852
+#: nova/exception.py:829
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:856
+#: nova/exception.py:833
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:860
+#: nova/exception.py:837
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:865
+#: nova/exception.py:842
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:869
+#: nova/exception.py:846
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:873
+#: nova/exception.py:850
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:877
+#: nova/exception.py:854
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:881
+#: nova/exception.py:858
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:885
+#: nova/exception.py:862
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:889
+#: nova/exception.py:866
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:893
+#: nova/exception.py:870
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:897
+#: nova/exception.py:874
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:901
+#: nova/exception.py:878
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:906
-#, python-format
-msgid "Volume Type %(name)s already exists."
-msgstr ""
-
-#: nova/exception.py:910
+#: nova/exception.py:883
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:914
+#: nova/exception.py:887
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:918
+#: nova/exception.py:891
msgid "Migration error"
msgstr ""
-#: nova/exception.py:922
+#: nova/exception.py:895
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:928
+#: nova/exception.py:901
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:932
+#: nova/exception.py:905
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:936
+#: nova/exception.py:909
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:940
+#: nova/exception.py:913
+#, python-format
+msgid "Resize error: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:917
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:944
+#: nova/exception.py:921
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:948
+#: nova/exception.py:925
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:952
+#: nova/exception.py:929
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:956
+#: nova/exception.py:933
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:960
+#: nova/exception.py:937
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:964
-#, python-format
-msgid "Host %(host)s is not up or doesn't exist."
-msgstr ""
-
-#: nova/exception.py:968
+#: nova/exception.py:941
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:975
+#: nova/exception.py:948
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:980
-msgid "Maximum volume size exceeded"
-msgstr ""
-
-#: nova/exception.py:984
-#, python-format
-msgid "Maximum number of volumes allowed (%(allowed)d) exceeded"
-msgstr ""
-
-#: nova/exception.py:988
+#: nova/exception.py:953
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:992
+#: nova/exception.py:957
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:996
+#: nova/exception.py:961
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:1000
+#: nova/exception.py:965
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:1004
+#: nova/exception.py:969
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:1008
+#: nova/exception.py:973
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:1012
+#: nova/exception.py:977
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1016
+#: nova/exception.py:981
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1021
+#: nova/exception.py:986
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1025
+#: nova/exception.py:990
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1029
+#: nova/exception.py:994
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1033
+#: nova/exception.py:998
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1038
+#: nova/exception.py:1003
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1042
-#, python-format
-msgid "Detected more than one volume with name %(vol_name)s"
-msgstr ""
-
-#: nova/exception.py:1046
-#, python-format
-msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s"
-msgstr ""
-
-#: nova/exception.py:1051
-#, python-format
-msgid "Bad or unexpected response from the storage volume backend API: %(data)s"
-msgstr ""
-
-#: nova/exception.py:1056
-msgid "Unknown NFS exception"
-msgstr ""
-
-#: nova/exception.py:1060
-msgid "No mounted NFS shares found"
-msgstr ""
-
-#: nova/exception.py:1064
-#, python-format
-msgid "There is no share which can host %(volume_size)sG"
-msgstr ""
-
-#: nova/exception.py:1068
+#: nova/exception.py:1007
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:1072
+#: nova/exception.py:1011
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1078
-msgid "Bad response from SolidFire API"
-msgstr ""
-
-#: nova/exception.py:1082
-#, python-format
-msgid "Error in SolidFire API response: data=%(data)s"
-msgstr ""
-
-#: nova/exception.py:1086
+#: nova/exception.py:1017
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1090
+#: nova/exception.py:1021
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1094
+#: nova/exception.py:1025
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1098
+#: nova/exception.py:1029
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1102
+#: nova/exception.py:1033
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1106
+#: nova/exception.py:1037
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1110
+#: nova/exception.py:1041
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1114
+#: nova/exception.py:1045
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1118
+#: nova/exception.py:1049
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1123
+#: nova/exception.py:1054
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1128
+#: nova/exception.py:1059
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1134
+#: nova/exception.py:1065
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1138
+#: nova/exception.py:1069
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1143
+#: nova/exception.py:1074
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1147
+#: nova/exception.py:1078
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
+#: nova/hooks.py:62
+#, python-format
+msgid "Running %(name)s pre-hook: %(obj)s"
+msgstr ""
+
+#: nova/hooks.py:70
+#, python-format
+msgid "Running %(name)s post-hook: %(obj)s"
+msgstr ""
+
#: nova/manager.py:166
#, python-format
msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run"
@@ -1222,246 +1150,243 @@ msgstr ""
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: nova/manager.py:255
+#: nova/manager.py:257
msgid "Notifying Schedulers of capabilities ..."
msgstr ""
-#: nova/notifications.py:112 nova/notifications.py:152
+#: nova/notifications.py:111 nova/notifications.py:151
msgid "Failed to send state update notification"
msgstr ""
-#: nova/policy.py:32
+#: nova/notifications.py:229
+msgid "Failed to get nw_info"
+msgstr ""
+
+#: nova/policy.py:31
msgid "JSON file representing policy"
msgstr ""
-#: nova/policy.py:35
+#: nova/policy.py:34
msgid "Rule checked when requested rule is not found"
msgstr ""
-#: nova/quota.py:726
+#: nova/quota.py:719
#, python-format
msgid "Created reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:745
+#: nova/quota.py:738
#, python-format
msgid "Failed to commit reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:763
+#: nova/quota.py:756
#, python-format
msgid "Failed to roll back reservations %(reservations)s"
msgstr ""
-#: nova/service.py:170
-msgid "Full set of FLAGS:"
+#: nova/service.py:173
+msgid "Full set of CONF:"
msgstr ""
-#: nova/service.py:177
+#: nova/service.py:180
#, python-format
msgid "%(flag)s : FLAG SET "
msgstr ""
-#: nova/service.py:187 nova/service.py:285
+#: nova/service.py:190 nova/service.py:288
#, python-format
msgid "Caught %s, exiting"
msgstr ""
-#: nova/service.py:231
+#: nova/service.py:234
msgid "Parent process has died unexpectedly, exiting"
msgstr ""
-#: nova/service.py:267
+#: nova/service.py:270
msgid "Forking too fast, sleeping"
msgstr ""
-#: nova/service.py:290
+#: nova/service.py:293
msgid "Unhandled exception"
msgstr ""
-#: nova/service.py:297
+#: nova/service.py:300
#, python-format
msgid "Started child %d"
msgstr ""
-#: nova/service.py:307
+#: nova/service.py:310
#, python-format
msgid "Starting %d workers"
msgstr ""
-#: nova/service.py:321
+#: nova/service.py:324
#, python-format
msgid "Child %(pid)d killed by signal %(sig)d"
msgstr ""
-#: nova/service.py:324
+#: nova/service.py:327
#, python-format
msgid "Child %(pid)d exited with status %(code)d"
msgstr ""
-#: nova/service.py:327
+#: nova/service.py:330
#, python-format
msgid "pid %d not in child list"
msgstr ""
-#: nova/service.py:347
+#: nova/service.py:350
#, python-format
msgid "Caught %s, stopping children"
msgstr ""
-#: nova/service.py:358
+#: nova/service.py:361
#, python-format
msgid "Waiting on %d children to exit"
msgstr ""
-#: nova/service.py:387
+#: nova/service.py:392
#, python-format
-msgid "Starting %(topic)s node (version %(vcs_string)s)"
+msgid "Starting %(topic)s node (version %(version)s)"
msgstr ""
-#: nova/service.py:403
+#: nova/service.py:409 nova/openstack/common/rpc/service.py:47
#, python-format
msgid "Creating Consumer connection for Service %s"
msgstr ""
-#: nova/service.py:495
-msgid "Service killed that has no database entry"
-msgstr ""
-
-#: nova/service.py:532
-msgid "The service database object disappeared, Recreating it."
-msgstr ""
-
-#: nova/service.py:547
-msgid "Recovered model server connection!"
+#: nova/service.py:429
+#, python-format
+msgid "Join ServiceGroup membership for this service %s"
msgstr ""
-#: nova/service.py:553
-msgid "model server went away"
+#: nova/service.py:506
+msgid "Service killed that has no database entry"
msgstr ""
-#: nova/service.py:644
+#: nova/service.py:626
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:170
+#: nova/utils.py:194
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:181
+#: nova/utils.py:205
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:197 nova/utils.py:275 nova/virt/powervm/common.py:82
+#: nova/utils.py:229 nova/utils.py:307 nova/virt/powervm/common.py:82
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:210
+#: nova/utils.py:242
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:250
+#: nova/utils.py:282
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:252
+#: nova/utils.py:284
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:256
+#: nova/utils.py:288
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:291
+#: nova/utils.py:323
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:450
+#: nova/utils.py:485
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:453
+#: nova/utils.py:488
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:488
+#: nova/utils.py:523
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:549
+#: nova/utils.py:584
msgid "in looping call"
msgstr ""
-#: nova/utils.py:609
+#: nova/utils.py:644
#, python-format
msgid "Unknown byte multiplier: %s"
msgstr ""
-#: nova/utils.py:738
+#: nova/utils.py:773
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:767
+#: nova/utils.py:802
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:895
+#: nova/utils.py:926
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:973
+#: nova/utils.py:1003
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1091 nova/virt/configdrive.py:151
+#: nova/utils.py:1113 nova/virt/configdrive.py:177
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: nova/wsgi.py:85
+#: nova/wsgi.py:87
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:109
+#: nova/wsgi.py:111
msgid "Stopping WSGI server."
msgstr ""
-#: nova/wsgi.py:127
+#: nova/wsgi.py:129
msgid "WSGI server has stopped."
msgstr ""
-#: nova/wsgi.py:196
+#: nova/wsgi.py:198
msgid "You must implement __call__"
msgstr ""
-#: nova/wsgi.py:382
+#: nova/wsgi.py:384
#, python-format
msgid "Loading app %(name)s from %(path)s"
msgstr ""
-#: nova/api/auth.py:109
+#: nova/api/auth.py:116
msgid "Invalid service catalog json."
msgstr ""
-#: nova/api/auth.py:132
+#: nova/api/auth.py:139
msgid "Sourcing roles from deprecated X-Role HTTP header"
msgstr ""
-#: nova/api/sizelimit.py:52
+#: nova/api/sizelimit.py:50 nova/api/metadata/password.py:64
msgid "Request is too large."
msgstr ""
@@ -1470,388 +1395,426 @@ msgstr ""
msgid "%(key)s with value %(value)s failed validator %(name)s"
msgstr ""
-#: nova/api/ec2/__init__.py:82
+#: nova/api/ec2/__init__.py:81
#, python-format
msgid "%(code)s: %(message)s"
msgstr ""
-#: nova/api/ec2/__init__.py:105
+#: nova/api/ec2/__init__.py:104
#, python-format
msgid "FaultWrapper: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:180
+#: nova/api/ec2/__init__.py:179
msgid "Too many failed authentications."
msgstr ""
-#: nova/api/ec2/__init__.py:190
+#: nova/api/ec2/__init__.py:189
#, python-format
msgid ""
"Access key %(access_key)s has had %(failures)d failed authentications and"
" will be locked out for %(lock_mins)d minutes."
msgstr ""
-#: nova/api/ec2/__init__.py:207
+#: nova/api/ec2/__init__.py:206
msgid "Signature not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:211
+#: nova/api/ec2/__init__.py:210
msgid "Access key not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:246 nova/api/ec2/__init__.py:261
+#: nova/api/ec2/__init__.py:245 nova/api/ec2/__init__.py:260
msgid "Failure communicating with keystone"
msgstr ""
-#: nova/api/ec2/__init__.py:320
+#: nova/api/ec2/__init__.py:259
+#, python-format
+msgid "Keystone failure: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:319
msgid "Timestamp failed validation."
msgstr ""
-#: nova/api/ec2/__init__.py:340
+#: nova/api/ec2/__init__.py:339
#, python-format
msgid "action: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:342
+#: nova/api/ec2/__init__.py:341
#, python-format
msgid "arg: %(key)s\t\tval: %(value)s"
msgstr ""
-#: nova/api/ec2/__init__.py:417
+#: nova/api/ec2/__init__.py:416
#, python-format
msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
msgstr ""
-#: nova/api/ec2/__init__.py:489
+#: nova/api/ec2/__init__.py:488
#, python-format
msgid "InstanceNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:495
+#: nova/api/ec2/__init__.py:494
#, python-format
msgid "VolumeNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:501
+#: nova/api/ec2/__init__.py:500
#, python-format
msgid "SnapshotNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:507
+#: nova/api/ec2/__init__.py:506
#, python-format
msgid "NotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:510
+#: nova/api/ec2/__init__.py:509
#, python-format
msgid "EC2APIError raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:518
+#: nova/api/ec2/__init__.py:517
#, python-format
msgid "KeyPairExists raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:522
+#: nova/api/ec2/__init__.py:521
#, python-format
msgid "InvalidParameterValue raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:526
+#: nova/api/ec2/__init__.py:525
#, python-format
msgid "InvalidPortRange raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:530
+#: nova/api/ec2/__init__.py:529
#, python-format
msgid "NotAuthorized raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:534
+#: nova/api/ec2/__init__.py:533
#, python-format
msgid "InvalidRequest raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:538
+#: nova/api/ec2/__init__.py:537
#, python-format
msgid "QuotaError raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:542
+#: nova/api/ec2/__init__.py:541
#, python-format
msgid "Invalid id: bogus (expecting \"i-...\"): %s"
msgstr ""
-#: nova/api/ec2/__init__.py:551
+#: nova/api/ec2/__init__.py:550
#, python-format
msgid "Unexpected error raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:552
+#: nova/api/ec2/__init__.py:551
#, python-format
msgid "Environment: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:81
+#: nova/api/ec2/__init__.py:553 nova/api/metadata/handler.py:138
+#: nova/api/metadata/handler.py:185
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
-#: nova/api/ec2/apirequest.py:63
+#: nova/api/ec2/apirequest.py:62
#, python-format
msgid "Unsupported API request: controller = %(controller)s, action = %(action)s"
msgstr ""
-#: nova/api/ec2/cloud.py:338
+#: nova/api/ec2/cloud.py:395
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:364
+#: nova/api/ec2/cloud.py:421
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:380
+#: nova/api/ec2/cloud.py:437
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:387 nova/api/ec2/cloud.py:412
+#: nova/api/ec2/cloud.py:444 nova/api/ec2/cloud.py:469
#: nova/api/openstack/compute/contrib/keypairs.py:93
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/ec2/cloud.py:393 nova/api/ec2/cloud.py:418
+#: nova/api/ec2/cloud.py:450 nova/api/ec2/cloud.py:475
#: nova/api/openstack/compute/contrib/keypairs.py:101
#, python-format
msgid "Key pair '%s' already exists."
msgstr ""
-#: nova/api/ec2/cloud.py:402
+#: nova/api/ec2/cloud.py:459
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:425
+#: nova/api/ec2/cloud.py:482
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:559 nova/api/ec2/cloud.py:680
+#: nova/api/ec2/cloud.py:616 nova/api/ec2/cloud.py:737
msgid "Not enough parameters, need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:564
+#: nova/api/ec2/cloud.py:621
#, python-format
msgid "%s Not enough parameters to build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:602 nova/api/ec2/cloud.py:634
+#: nova/api/ec2/cloud.py:659 nova/api/ec2/cloud.py:691
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:625
+#: nova/api/ec2/cloud.py:682
#, python-format
msgid "%s - This rule already exists in group"
msgstr ""
-#: nova/api/ec2/cloud.py:691
+#: nova/api/ec2/cloud.py:748
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:767
+#: nova/api/ec2/cloud.py:824
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:771 nova/api/openstack/compute/contrib/volumes.py:241
+#: nova/api/ec2/cloud.py:828 nova/api/openstack/compute/contrib/volumes.py:241
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:799
+#: nova/api/ec2/cloud.py:856
msgid "Delete Failed"
msgstr ""
-#: nova/api/ec2/cloud.py:812
+#: nova/api/ec2/cloud.py:869
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:820
+#: nova/api/ec2/cloud.py:877
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:833 nova/api/openstack/compute/contrib/volumes.py:420
+#: nova/api/ec2/cloud.py:890 nova/api/openstack/compute/contrib/volumes.py:428
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:839
+#: nova/api/ec2/cloud.py:896
msgid "Detach Volume Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:865 nova/api/ec2/cloud.py:922
-#: nova/api/ec2/cloud.py:1459 nova/api/ec2/cloud.py:1474
+#: nova/api/ec2/cloud.py:922 nova/api/ec2/cloud.py:979
+#: nova/api/ec2/cloud.py:1528 nova/api/ec2/cloud.py:1543
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:988
+#: nova/api/ec2/cloud.py:1049
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1139
+#: nova/api/ec2/cloud.py:1208
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1143
+#: nova/api/ec2/cloud.py:1212
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1147
+#: nova/api/ec2/cloud.py:1216
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1152
+#: nova/api/ec2/cloud.py:1221
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1155
+#: nova/api/ec2/cloud.py:1224
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1163
+#: nova/api/ec2/cloud.py:1232
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1171
+#: nova/api/ec2/cloud.py:1240
#: nova/api/openstack/compute/contrib/floating_ips.py:257
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1180
+#: nova/api/ec2/cloud.py:1249
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1183
+#: nova/api/ec2/cloud.py:1252
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1186
+#: nova/api/ec2/cloud.py:1255
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1194
+#: nova/api/ec2/cloud.py:1263
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1199
+#: nova/api/ec2/cloud.py:1268
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1202
+#: nova/api/ec2/cloud.py:1271
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1229
+#: nova/api/ec2/cloud.py:1298
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1261
+#: nova/api/ec2/cloud.py:1330
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1271
+#: nova/api/ec2/cloud.py:1340
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1280
+#: nova/api/ec2/cloud.py:1349
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1289
+#: nova/api/ec2/cloud.py:1358
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1380
+#: nova/api/ec2/cloud.py:1449
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1396
+#: nova/api/ec2/cloud.py:1465
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1415
+#: nova/api/ec2/cloud.py:1484
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1477
+#: nova/api/ec2/cloud.py:1546
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1479
+#: nova/api/ec2/cloud.py:1548
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1481
+#: nova/api/ec2/cloud.py:1550
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1483
+#: nova/api/ec2/cloud.py:1552
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1496
+#: nova/api/ec2/cloud.py:1565
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1525
+#: nova/api/ec2/cloud.py:1594
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1555
+#: nova/api/ec2/cloud.py:1624
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1573
+#: nova/api/ec2/cloud.py:1642
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1606
+#: nova/api/ec2/cloud.py:1675
msgid "Invalid CIDR"
msgstr ""
-#: nova/api/ec2/ec2utils.py:187
+#: nova/api/ec2/ec2utils.py:188
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
-#: nova/api/ec2/ec2utils.py:207
+#: nova/api/ec2/ec2utils.py:208
msgid "Timestamp is invalid."
msgstr ""
-#: nova/api/metadata/handler.py:79 nova/api/metadata/handler.py:86
+#: nova/api/metadata/handler.py:110
+msgid ""
+"X-Instance-ID present in request headers. The "
+"'service_quantum_metadata_proxy' option must be enabled to process this "
+"header."
+msgstr ""
+
+#: nova/api/metadata/handler.py:136 nova/api/metadata/handler.py:143
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr ""
+#: nova/api/metadata/handler.py:155
+msgid "X-Instance-ID header is missing from request."
+msgstr ""
+
+#: nova/api/metadata/handler.py:157
+msgid "Multiple X-Instance-ID headers found within request."
+msgstr ""
+
+#: nova/api/metadata/handler.py:171
+#, python-format
+msgid ""
+"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
+" %(expected_signature)s for id: %(instance_id)s. Request From: "
+"%(remote_address)s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:176
+msgid "Invalid proxy request signature."
+msgstr ""
+
+#: nova/api/metadata/handler.py:183 nova/api/metadata/handler.py:190
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
#: nova/api/openstack/__init__.py:51
#, python-format
msgid "Caught error: %s"
@@ -1862,150 +1825,150 @@ msgstr ""
msgid "%(url)s returned with HTTP %(status)d"
msgstr ""
-#: nova/api/openstack/__init__.py:126
+#: nova/api/openstack/__init__.py:135
msgid "Must specify an ExtensionManager class"
msgstr ""
-#: nova/api/openstack/__init__.py:137
+#: nova/api/openstack/__init__.py:146
#, python-format
msgid "Extended resource: %s"
msgstr ""
-#: nova/api/openstack/__init__.py:171
+#: nova/api/openstack/__init__.py:180
#, python-format
msgid ""
"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
"resource"
msgstr ""
-#: nova/api/openstack/__init__.py:176
+#: nova/api/openstack/__init__.py:185
#, python-format
msgid "Extension %(ext_name)s extending resource: %(collection)s"
msgstr ""
-#: nova/api/openstack/common.py:100
+#: nova/api/openstack/common.py:114
#, python-format
msgid ""
"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
"Bad upgrade or db corrupted?"
msgstr ""
-#: nova/api/openstack/common.py:139 nova/api/openstack/common.py:173
+#: nova/api/openstack/common.py:153 nova/api/openstack/common.py:187
msgid "limit param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:142 nova/api/openstack/common.py:177
+#: nova/api/openstack/common.py:156 nova/api/openstack/common.py:191
msgid "limit param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:167
+#: nova/api/openstack/common.py:181
msgid "offset param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:181
+#: nova/api/openstack/common.py:195
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:216 nova/api/openstack/compute/servers.py:538
+#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:536
#, python-format
msgid "marker [%s] not found"
msgstr ""
-#: nova/api/openstack/common.py:256
+#: nova/api/openstack/common.py:270
#, python-format
msgid "href %s does not contain version"
msgstr ""
-#: nova/api/openstack/common.py:271
+#: nova/api/openstack/common.py:285
msgid "Image metadata limit exceeded"
msgstr ""
-#: nova/api/openstack/common.py:279
+#: nova/api/openstack/common.py:293
msgid "Image metadata key cannot be blank"
msgstr ""
-#: nova/api/openstack/common.py:282
+#: nova/api/openstack/common.py:296
msgid "Image metadata key too long"
msgstr ""
-#: nova/api/openstack/common.py:285
+#: nova/api/openstack/common.py:299
msgid "Invalid image metadata"
msgstr ""
-#: nova/api/openstack/common.py:336
+#: nova/api/openstack/common.py:350
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr ""
-#: nova/api/openstack/common.py:339
+#: nova/api/openstack/common.py:353
#, python-format
msgid "Instance is in an invalid state for '%(action)s'"
msgstr ""
-#: nova/api/openstack/common.py:419
+#: nova/api/openstack/common.py:433
msgid "Rejecting snapshot request, snapshots currently disabled"
msgstr ""
-#: nova/api/openstack/common.py:421
+#: nova/api/openstack/common.py:435
msgid "Instance snapshots are not permitted at this time."
msgstr ""
-#: nova/api/openstack/extensions.py:196
+#: nova/api/openstack/extensions.py:195
#, python-format
msgid "Loaded extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:235
+#: nova/api/openstack/extensions.py:234
#, python-format
msgid "Ext name: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:236
+#: nova/api/openstack/extensions.py:235
#, python-format
msgid "Ext alias: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:237
+#: nova/api/openstack/extensions.py:236
#, python-format
msgid "Ext description: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:239
+#: nova/api/openstack/extensions.py:238
#, python-format
msgid "Ext namespace: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:240
+#: nova/api/openstack/extensions.py:239
#, python-format
msgid "Ext updated: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:242
+#: nova/api/openstack/extensions.py:241
#, python-format
msgid "Exception loading extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:256
+#: nova/api/openstack/extensions.py:255
#, python-format
msgid "Loading extension %s"
msgstr ""
-#: nova/api/openstack/extensions.py:265
+#: nova/api/openstack/extensions.py:264
#, python-format
msgid "Calling extension factory %s"
msgstr ""
-#: nova/api/openstack/extensions.py:277
+#: nova/api/openstack/extensions.py:276
#, python-format
msgid "Failed to load extension %(ext_factory)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:358
+#: nova/api/openstack/extensions.py:357
#, python-format
msgid "Failed to load extension %(classpath)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:382
+#: nova/api/openstack/extensions.py:381
#, python-format
msgid "Failed to load extension %(ext_name)s: %(exc)s"
msgstr ""
@@ -2015,7 +1978,7 @@ msgid "cannot understand JSON"
msgstr ""
#: nova/api/openstack/wsgi.py:223
-#: nova/api/openstack/compute/contrib/hosts.py:83
+#: nova/api/openstack/compute/contrib/hosts.py:78
msgid "cannot understand XML"
msgstr ""
@@ -2064,7 +2027,7 @@ msgstr ""
#: nova/api/openstack/compute/server_metadata.py:76
#: nova/api/openstack/compute/server_metadata.py:101
#: nova/api/openstack/compute/server_metadata.py:126
-#: nova/api/openstack/compute/contrib/admin_actions.py:212
+#: nova/api/openstack/compute/contrib/admin_actions.py:211
msgid "Malformed request body"
msgstr ""
@@ -2098,7 +2061,7 @@ msgstr ""
msgid "subclasses must implement construct()!"
msgstr ""
-#: nova/api/openstack/compute/extensions.py:31
+#: nova/api/openstack/compute/extensions.py:38
msgid "Initializing extension manager."
msgstr ""
@@ -2117,37 +2080,37 @@ msgstr ""
msgid "Invalid minDisk filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:37
-#: nova/api/openstack/compute/images.py:145
-#: nova/api/openstack/compute/images.py:160
+#: nova/api/openstack/compute/image_metadata.py:36
+#: nova/api/openstack/compute/images.py:144
+#: nova/api/openstack/compute/images.py:159
msgid "Image not found."
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:77
+#: nova/api/openstack/compute/image_metadata.py:76
msgid "Incorrect request body format"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:81
+#: nova/api/openstack/compute/image_metadata.py:80
#: nova/api/openstack/compute/server_metadata.py:80
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:79
msgid "Request body and URI mismatch"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:84
+#: nova/api/openstack/compute/image_metadata.py:83
#: nova/api/openstack/compute/server_metadata.py:84
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:82
msgid "Request body contains too many items"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:110
+#: nova/api/openstack/compute/image_metadata.py:109
msgid "Invalid metadata key"
msgstr ""
-#: nova/api/openstack/compute/ips.py:72
+#: nova/api/openstack/compute/ips.py:71
msgid "Instance does not exist"
msgstr ""
-#: nova/api/openstack/compute/ips.py:95
+#: nova/api/openstack/compute/ips.py:94
msgid "Instance is not a member of specified network"
msgstr ""
@@ -2158,7 +2121,7 @@ msgid ""
"%(unit_string)s."
msgstr ""
-#: nova/api/openstack/compute/limits.py:271
+#: nova/api/openstack/compute/limits.py:272
msgid "This request was rate-limited."
msgstr ""
@@ -2173,96 +2136,98 @@ msgstr ""
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:447
-#: nova/api/openstack/compute/servers.py:459
-#: nova/api/openstack/compute/servers.py:554
-#: nova/api/openstack/compute/servers.py:722
-#: nova/api/openstack/compute/servers.py:983
-#: nova/api/openstack/compute/servers.py:1086
-#: nova/api/openstack/compute/servers.py:1236
-msgid "Instance could not be found"
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:498
+#: nova/api/openstack/compute/servers.py:501
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:517
+#: nova/api/openstack/compute/servers.py:520
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:561
+#: nova/api/openstack/compute/servers.py:539
+msgid "Flavor could not be found"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:555
+#: nova/api/openstack/compute/servers.py:723
+#: nova/api/openstack/compute/servers.py:987
+#: nova/api/openstack/compute/servers.py:1090
+#: nova/api/openstack/compute/servers.py:1258
+msgid "Instance could not be found"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:562
msgid "Server name is not a string or unicode"
msgstr ""
-#: nova/api/openstack/compute/servers.py:565
+#: nova/api/openstack/compute/servers.py:566
msgid "Server name is an empty string"
msgstr ""
-#: nova/api/openstack/compute/servers.py:569
+#: nova/api/openstack/compute/servers.py:570
msgid "Server name must be less than 256 characters."
msgstr ""
-#: nova/api/openstack/compute/servers.py:586
+#: nova/api/openstack/compute/servers.py:587
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:589
+#: nova/api/openstack/compute/servers.py:590
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:593
+#: nova/api/openstack/compute/servers.py:594
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:624
+#: nova/api/openstack/compute/servers.py:625
msgid "Unknown argment : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:627
+#: nova/api/openstack/compute/servers.py:628
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:637
+#: nova/api/openstack/compute/servers.py:638
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:647
+#: nova/api/openstack/compute/servers.py:648
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:660
+#: nova/api/openstack/compute/servers.py:661
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:666
+#: nova/api/openstack/compute/servers.py:667
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:669
+#: nova/api/openstack/compute/servers.py:670
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:695
+#: nova/api/openstack/compute/servers.py:696
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:702
+#: nova/api/openstack/compute/servers.py:703
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:709
+#: nova/api/openstack/compute/servers.py:710
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:738
+#: nova/api/openstack/compute/servers.py:739
msgid "Server name is not defined"
msgstr ""
@@ -2303,246 +2268,252 @@ msgstr ""
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1001
-#: nova/api/openstack/compute/servers.py:1021
+#: nova/api/openstack/compute/servers.py:979
+msgid "Personality cannot be updated."
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:1005
+#: nova/api/openstack/compute/servers.py:1025
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1007
+#: nova/api/openstack/compute/servers.py:1011
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1027
+#: nova/api/openstack/compute/servers.py:1031
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1040
+#: nova/api/openstack/compute/servers.py:1044
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1044
+#: nova/api/openstack/compute/servers.py:1048
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1057
+#: nova/api/openstack/compute/servers.py:1061
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1069
+#: nova/api/openstack/compute/servers.py:1073
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1072
+#: nova/api/openstack/compute/servers.py:1076
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1096
+#: nova/api/openstack/compute/servers.py:1100
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1105
+#: nova/api/openstack/compute/servers.py:1109
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1114
+#: nova/api/openstack/compute/servers.py:1136
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1127
+#: nova/api/openstack/compute/servers.py:1149
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1131
-#: nova/api/openstack/compute/servers.py:1333
+#: nova/api/openstack/compute/servers.py:1153
+#: nova/api/openstack/compute/servers.py:1355
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1142
+#: nova/api/openstack/compute/servers.py:1164
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1155
+#: nova/api/openstack/compute/servers.py:1177
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1158
+#: nova/api/openstack/compute/servers.py:1180
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1176
-#: nova/api/openstack/compute/contrib/aggregates.py:142
+#: nova/api/openstack/compute/servers.py:1198
+#: nova/api/openstack/compute/contrib/aggregates.py:143
+#: nova/api/openstack/compute/contrib/coverage_ext.py:246
#: nova/api/openstack/compute/contrib/keypairs.py:78
-#: nova/api/openstack/compute/contrib/networks.py:73
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1181
+#: nova/api/openstack/compute/servers.py:1203
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1243
+#: nova/api/openstack/compute/servers.py:1265
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1276
+#: nova/api/openstack/compute/servers.py:1298
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1360
+#: nova/api/openstack/compute/servers.py:1382
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:61
+#: nova/api/openstack/compute/contrib/admin_actions.py:60
#, python-format
msgid "Compute.api::pause %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:78
+#: nova/api/openstack/compute/contrib/admin_actions.py:77
#, python-format
msgid "Compute.api::unpause %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:95
+#: nova/api/openstack/compute/contrib/admin_actions.py:94
#, python-format
msgid "compute.api::suspend %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#: nova/api/openstack/compute/contrib/admin_actions.py:111
#, python-format
msgid "compute.api::resume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:128
+#: nova/api/openstack/compute/contrib/admin_actions.py:127
#, python-format
msgid "Error in migrate %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:142
+#: nova/api/openstack/compute/contrib/admin_actions.py:141
#, python-format
msgid "Compute.api::reset_network %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:155
-#: nova/api/openstack/compute/contrib/admin_actions.py:171
-#: nova/api/openstack/compute/contrib/admin_actions.py:187
-#: nova/api/openstack/compute/contrib/admin_actions.py:309
+#: nova/api/openstack/compute/contrib/admin_actions.py:154
+#: nova/api/openstack/compute/contrib/admin_actions.py:170
+#: nova/api/openstack/compute/contrib/admin_actions.py:186
+#: nova/api/openstack/compute/contrib/admin_actions.py:314
#: nova/api/openstack/compute/contrib/multinic.py:41
-#: nova/api/openstack/compute/contrib/rescue.py:45
+#: nova/api/openstack/compute/contrib/rescue.py:44
msgid "Server not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:158
+#: nova/api/openstack/compute/contrib/admin_actions.py:157
#, python-format
msgid "Compute.api::inject_network_info %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:174
+#: nova/api/openstack/compute/contrib/admin_actions.py:173
#, python-format
msgid "Compute.api::lock %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:190
+#: nova/api/openstack/compute/contrib/admin_actions.py:189
#, python-format
msgid "Compute.api::unlock %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:220
+#: nova/api/openstack/compute/contrib/admin_actions.py:219
#, python-format
msgid "createBackup entity requires %s attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:224
+#: nova/api/openstack/compute/contrib/admin_actions.py:223
msgid "Malformed createBackup entity"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:230
+#: nova/api/openstack/compute/contrib/admin_actions.py:229
msgid "createBackup attribute 'rotation' must be an integer"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:245
+#: nova/api/openstack/compute/contrib/admin_actions.py:232
+msgid "createBackup attribute 'rotation' must be greater than or equal to zero"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:248
#: nova/api/openstack/compute/contrib/console_output.py:47
#: nova/api/openstack/compute/contrib/server_diagnostics.py:47
#: nova/api/openstack/compute/contrib/server_start_stop.py:38
msgid "Instance not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:273
+#: nova/api/openstack/compute/contrib/admin_actions.py:278
msgid "host and block_migration must be specified."
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:281
+#: nova/api/openstack/compute/contrib/admin_actions.py:286
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:299
+#: nova/api/openstack/compute/contrib/admin_actions.py:304
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:312
+#: nova/api/openstack/compute/contrib/admin_actions.py:317
#, python-format
msgid "Compute.api::resetState %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:76
-#, python-format
-msgid ""
-"Cannot create aggregate with name %(name)s and availability zone "
-"%(avail_zone)s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/aggregates.py:88
+#: nova/api/openstack/compute/contrib/aggregates.py:89
#, python-format
msgid "Cannot show aggregate: %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:114
+#: nova/api/openstack/compute/contrib/aggregates.py:115
#, python-format
msgid "Cannot update aggregate: %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:126
+#: nova/api/openstack/compute/contrib/aggregates.py:127
#, python-format
msgid "Cannot delete aggregate: %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:139
+#: nova/api/openstack/compute/contrib/aggregates.py:140
#, python-format
msgid "Aggregates does not have %s action"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:152
-#: nova/api/openstack/compute/contrib/aggregates.py:157
+#: nova/api/openstack/compute/contrib/aggregates.py:153
+#: nova/api/openstack/compute/contrib/aggregates.py:158
#, python-format
msgid "Cannot add host %(host)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:170
-#: nova/api/openstack/compute/contrib/aggregates.py:174
+#: nova/api/openstack/compute/contrib/aggregates.py:171
+#: nova/api/openstack/compute/contrib/aggregates.py:175
#, python-format
msgid "Cannot remove host %(host)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:194
+#: nova/api/openstack/compute/contrib/aggregates.py:195
#, python-format
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/certificates.py:74
+#: nova/api/openstack/compute/contrib/certificates.py:73
msgid "Only root certificate can be retrieved."
msgstr ""
-#: nova/api/openstack/compute/contrib/cloudpipe.py:149
+#: nova/api/openstack/compute/contrib/cloudpipe.py:151
msgid ""
"Unable to claim IP for VPN instances, ensure it isn't running, and try "
"again in a few minutes"
msgstr ""
+#: nova/api/openstack/compute/contrib/cloudpipe_update.py:44
+#, python-format
+msgid "Unknown action %s"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/console_output.py:52
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr ""
@@ -2555,11 +2526,56 @@ msgstr ""
msgid "Unable to get console"
msgstr ""
+#: nova/api/openstack/compute/contrib/coverage_ext.py:101
+#, python-format
+msgid "Can't connect to service: %s, no portspecified\n"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:104
+#, python-format
+msgid "No backdoor API command for service: %s\n"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:123
+msgid "Coverage begin"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:157
+msgid "Coverage not running"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:186
+msgid "Invalid path"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:190
+msgid "No path given for report file"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:197
+msgid "You can't use html reports without combining"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:244
+#, python-format
+msgid "Coverage doesn't have %s action"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/disk_config.py:43
#, python-format
msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr ""
+#: nova/api/openstack/compute/contrib/fixed_ips.py:42
+#, python-format
+msgid "Fixed IP %s has been deleted"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/fixed_ips.py:74
+#, python-format
+msgid "Fixed IP %s not found"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/flavor_access.py:80
#: nova/api/openstack/compute/contrib/flavor_access.py:104
msgid "Flavor not found."
@@ -2598,7 +2614,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/floating_ips.py:234
#: nova/api/openstack/compute/contrib/floating_ips.py:290
-#: nova/api/openstack/compute/contrib/security_groups.py:415
+#: nova/api/openstack/compute/contrib/security_groups.py:414
msgid "Missing parameter dict"
msgstr ""
@@ -2632,6 +2648,15 @@ msgstr ""
msgid "Error. Unable to associate floating ip"
msgstr ""
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:147
+#, python-format
+msgid "/%s should be specified as single address(es) not in cidr format"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/fping.py:56
+msgid "fping utility is not found."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/hosts.py:122
#, python-format
msgid "Host '%s' could not be found."
@@ -2657,16 +2682,28 @@ msgstr ""
msgid "Putting host %(host)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:188
+#: nova/api/openstack/compute/contrib/hosts.py:182
+msgid "Virt driver does not implement host maintenance mode."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:190
#, python-format
msgid "Setting host %(host)s to %(state)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:235
+#: nova/api/openstack/compute/contrib/hosts.py:195
+msgid "Virt driver does not implement host disabled status."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:207
+msgid "Virt driver does not implement host power management."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:239
msgid "Describe-resource is admin only functionality"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:243
+#: nova/api/openstack/compute/contrib/hosts.py:247
msgid "Host not found"
msgstr ""
@@ -2704,59 +2741,67 @@ msgstr ""
msgid "Unable to find address %r"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:70
-#, python-format
-msgid "Network does not have %s action"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/networks.py:78
+#: nova/api/openstack/compute/contrib/networks.py:72
#, python-format
msgid "Disassociating network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:82
-#: nova/api/openstack/compute/contrib/networks.py:99
-#: nova/api/openstack/compute/contrib/networks.py:109
+#: nova/api/openstack/compute/contrib/networks.py:77
+#: nova/api/openstack/compute/contrib/networks.py:87
+#: nova/api/openstack/compute/contrib/networks.py:97
+#: nova/api/openstack/compute/contrib/networks_associate.py:27
+#: nova/api/openstack/compute/contrib/networks_associate.py:38
+#: nova/api/openstack/compute/contrib/networks_associate.py:50
msgid "Network not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:95
+#: nova/api/openstack/compute/contrib/networks.py:83
#, python-format
msgid "Showing network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:105
+#: nova/api/openstack/compute/contrib/networks.py:93
#, python-format
msgid "Deleting network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:120
+#: nova/api/openstack/compute/contrib/networks.py:108
msgid "Missing network in body"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:124
+#: nova/api/openstack/compute/contrib/networks.py:112
msgid "Network label is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:128
+#: nova/api/openstack/compute/contrib/networks.py:116
msgid "Network cidr or cidr_v6 is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:130
+#: nova/api/openstack/compute/contrib/networks.py:118
#, python-format
msgid "Creating network with label %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:146
+#: nova/api/openstack/compute/contrib/networks.py:134
#, python-format
msgid "Associating network %(network)s with project %(project)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:154
+#: nova/api/openstack/compute/contrib/networks.py:142
#, python-format
msgid "Cannot associate network %(network)s with project %(project)s: %(message)s"
msgstr ""
+#: nova/api/openstack/compute/contrib/networks_associate.py:23
+#, python-format
+msgid "Disassociating host with network with id %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks_associate.py:34
+#, python-format
+msgid "Disassociating project with network with id %s"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/quotas.py:63
msgid "Quota limit must be -1 or greater."
msgstr ""
@@ -2765,24 +2810,24 @@ msgstr ""
msgid "Malformed scheduler_hints attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:222
+#: nova/api/openstack/compute/contrib/security_groups.py:221
msgid "Security group id should be integer"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:331
+#: nova/api/openstack/compute/contrib/security_groups.py:330
msgid "Not enough parameters to build a valid rule."
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:337
+#: nova/api/openstack/compute/contrib/security_groups.py:336
#, python-format
msgid "This rule already exists in group %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:418
+#: nova/api/openstack/compute/contrib/security_groups.py:417
msgid "Security group not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:422
+#: nova/api/openstack/compute/contrib/security_groups.py:421
msgid "Security group name cannot be empty"
msgstr ""
@@ -2805,48 +2850,169 @@ msgid "Delete volume with id: %s"
msgstr ""
#: nova/api/openstack/compute/contrib/volumes.py:350
-#: nova/api/openstack/compute/contrib/volumes.py:430
+#: nova/api/openstack/compute/contrib/volumes.py:438
#, python-format
msgid "Instance %s is not attached."
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:381
+#: nova/api/openstack/compute/contrib/volumes.py:371
+#, python-format
+msgid "Bad volumeId format: volumeId is not in proper format (%s)"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/volumes.py:389
#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:544
+#: nova/api/openstack/compute/contrib/volumes.py:552
#, python-format
msgid "Delete snapshot with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:587
+#: nova/api/openstack/compute/contrib/volumes.py:595
#, python-format
msgid "Create snapshot from volume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:591
+#: nova/api/openstack/compute/contrib/volumes.py:599
#, python-format
msgid "Invalid value '%s' for force. "
msgstr ""
-#: nova/api/openstack/compute/views/servers.py:182
+#: nova/api/openstack/compute/views/servers.py:185
msgid "Instance has had its instance_type removed from the DB"
msgstr ""
-#: nova/cloudpipe/pipelib.py:45
+#: nova/cells/messaging.py:198
+#, python-format
+msgid "Error processing message locally: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:352 nova/cells/messaging.py:358
+#, python-format
+msgid "destination is %(target_cell)s but routing_path is %(routing_path)s"
+msgstr ""
+
+#: nova/cells/messaging.py:368
+#, python-format
+msgid "Unknown %(cell_type)s when routing to %(target_cell)s"
+msgstr ""
+
+#: nova/cells/messaging.py:392
+#, python-format
+msgid "Error locating next hop for message: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:419
+#, python-format
+msgid "Failed to send message to cell: %(next_hop)s: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:498
+#, python-format
+msgid "Error locating next hops for message: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:518
+#, python-format
+msgid "Error sending message to next hops: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:536
+#, python-format
+msgid "Error waiting for responses from neighbor cells: %(exc)s"
+msgstr ""
+
+#: nova/cells/messaging.py:628
+#, python-format
+msgid "Unknown method '%(method)s' in compute API"
+msgstr ""
+
+#: nova/cells/messaging.py:651
+#, python-format
+msgid "Received capabilities from child cell %(cell_name)s: %(capabilities)s"
+msgstr ""
+
+#: nova/cells/messaging.py:660
+#, python-format
+msgid "Received capacities from child cell %(cell_name)s: %(capacities)s"
+msgstr ""
+
+#: nova/cells/messaging.py:719
+#, python-format
+msgid "Got update for instance %(instance_uuid)s: %(instance)s"
+msgstr ""
+
+#: nova/cells/messaging.py:742
+#, python-format
+msgid "Got update to delete instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/cells/messaging.py:757
+#, python-format
+msgid "Got broadcast to %(delete_type)s delete instance"
+msgstr ""
+
+#: nova/cells/messaging.py:771
+#, python-format
+msgid "Got message to create instance fault: %(instance_fault)s"
+msgstr ""
+
+#: nova/cells/messaging.py:921
+#, python-format
+msgid "Updating parents with our capabilities: %(capabs)s"
+msgstr ""
+
+#: nova/cells/messaging.py:941
+#, python-format
+msgid "Updating parents with our capacities: %(capacities)s"
+msgstr ""
+
+#: nova/cells/scheduler.py:94
+#, python-format
+msgid "Scheduling with routing_path=%(routing_path)s"
+msgstr ""
+
+#: nova/cells/scheduler.py:117
+#, python-format
+msgid ""
+"No cells available when scheduling. Will retry in %(sleep_time)s "
+"second(s)"
+msgstr ""
+
+#: nova/cells/scheduler.py:124
+#, python-format
+msgid "Error scheduling instances %(instance_uuids)s"
+msgstr ""
+
+#: nova/cells/state.py:264
+msgid "Updating cell cache from db."
+msgstr ""
+
+#: nova/cells/state.py:300
+#, python-format
+msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
+msgstr ""
+
+#: nova/cells/state.py:315
+#, python-format
+msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:43
msgid "Instance type for vpn instances"
msgstr ""
-#: nova/cloudpipe/pipelib.py:48
+#: nova/cloudpipe/pipelib.py:46
msgid "Template for cloudpipe instance boot script"
msgstr ""
-#: nova/cloudpipe/pipelib.py:51
+#: nova/cloudpipe/pipelib.py:49
msgid "Network to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:54
+#: nova/cloudpipe/pipelib.py:52
msgid "Netmask to push into openvpn config"
msgstr ""
@@ -2863,879 +3029,888 @@ msgstr ""
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/compute/api.py:224
+#: nova/compute/api.py:257
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:231
+#: nova/compute/api.py:264
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:240
+#: nova/compute/api.py:273
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:260
+#: nova/compute/api.py:293
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:270
+#: nova/compute/api.py:303
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:274
+#: nova/compute/api.py:307
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:278
+#: nova/compute/api.py:311
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:502
+#: nova/compute/api.py:543
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:574
+#: nova/compute/api.py:615
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:601
+#: nova/compute/api.py:642
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:833
-msgid "Going to try to soft delete instance"
+#: nova/compute/api.py:878
+msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:850
-msgid "No host for instance, deleting immediately"
+#: nova/compute/api.py:973
+msgid "host for instance is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:950
-msgid "host for instance is down, deleting from database"
+#: nova/compute/api.py:1017
+msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:994
+#: nova/compute/api.py:1039
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1034
+#: nova/compute/api.py:1086
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1048
+#: nova/compute/api.py:1100
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1112
+#: nova/compute/api.py:1167
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1247
+#: nova/compute/api.py:1299
#, python-format
msgid "Image type not recognized %s"
msgstr ""
-#: nova/compute/api.py:1356
+#: nova/compute/api.py:1408
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1678
+#: nova/compute/api.py:1741
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1687
+#: nova/compute/api.py:1750
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1729
+#: nova/compute/api.py:1792
#, python-format
-msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s"
+msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:1901
+#: nova/compute/api.py:1976
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1909
+#: nova/compute/api.py:1984
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:1977
+#: nova/compute/api.py:2052
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2062
+#: nova/compute/api.py:2137
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2211
+#: nova/compute/api.py:2294
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2215
+#: nova/compute/api.py:2298
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2316
+#: nova/compute/api.py:2399
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2319
+#: nova/compute/api.py:2402
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2327
+#: nova/compute/api.py:2410
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2333
+#: nova/compute/api.py:2416
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2353
+#: nova/compute/api.py:2436
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2356
+#: nova/compute/api.py:2439
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2363
+#: nova/compute/api.py:2446
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2428
+#: nova/compute/api.py:2511
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2436
+#: nova/compute/api.py:2519
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2439
+#: nova/compute/api.py:2522
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2696
+#: nova/compute/api.py:2779
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2705
+#: nova/compute/api.py:2788
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2708
+#: nova/compute/api.py:2791
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2719
+#: nova/compute/api.py:2802
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
-#: nova/compute/instance_types.py:63
-msgid "names can only contain [a-zA-Z0-9_.- ]"
+#: nova/compute/claims.py:94 nova/compute/claims.py:218
+#, python-format
+msgid "Aborting claim: %s"
+msgstr ""
+
+#: nova/compute/claims.py:116
+#, python-format
+msgid ""
+"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
+"%(vcpus)d"
+msgstr ""
+
+#: nova/compute/claims.py:128
+msgid "Claim successful"
+msgstr ""
+
+#: nova/compute/claims.py:130
+msgid "Claim failed"
+msgstr ""
+
+#: nova/compute/claims.py:135
+msgid "Memory"
msgstr ""
-#: nova/compute/instance_types.py:72 nova/compute/instance_types.py:80
-msgid "create arguments must be positive integers"
+#: nova/compute/claims.py:144
+msgid "Disk"
msgstr ""
-#: nova/compute/instance_types.py:94
+#: nova/compute/claims.py:153
+msgid "CPU"
+msgstr ""
+
+#: nova/compute/claims.py:165
#, python-format
-msgid "DB error: %s"
+msgid "Total %(type_)s: %(total)d %(unit)s, used: %(used)d %(unit)s"
msgstr ""
-#: nova/compute/instance_types.py:104
+#: nova/compute/claims.py:170
#, python-format
-msgid "Instance type %s not found for deletion"
+msgid "%(type_)s limit not specified, defaulting to unlimited"
msgstr ""
-#: nova/compute/manager.py:163
-msgid "Possibly task preempted."
+#: nova/compute/claims.py:177
+#, python-format
+msgid "%(type_)s limit: %(limit)d %(unit)s, free: %(free)d %(unit)s"
msgstr ""
-#: nova/compute/manager.py:243
-msgid "Compute driver option required, but not specified"
+#: nova/compute/claims.py:184
+#, python-format
+msgid ""
+"Unable to claim resources. Free %(type_)s %(free)d %(unit)s < requested "
+"%(requested)d %(unit)s"
msgstr ""
-#: nova/compute/manager.py:248
+#: nova/compute/instance_types.py:65
+msgid "names can only contain [a-zA-Z0-9_.- ]"
+msgstr ""
+
+#: nova/compute/instance_types.py:74
#, python-format
-msgid "Loading compute driver '%s'"
+msgid "'%s' argument must be a positive integer"
msgstr ""
-#: nova/compute/manager.py:255
+#: nova/compute/instance_types.py:82
+msgid "'rxtx_factor' argument must be a positive float"
+msgstr ""
+
+#: nova/compute/instance_types.py:90
#, python-format
-msgid "Unable to load the virtualization driver: %s"
+msgid "'%s' argument must be greater than 0"
+msgstr ""
+
+#: nova/compute/instance_types.py:100
+msgid "is_public must be a boolean"
+msgstr ""
+
+#: nova/compute/instance_types.py:107
+#, python-format
+msgid "DB error: %s"
msgstr ""
-#: nova/compute/manager.py:290
+#: nova/compute/instance_types.py:117
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:199
+msgid "Possibly task preempted."
+msgstr ""
+
+#: nova/compute/manager.py:329
+#, python-format
+msgid "%(nodename)s is not a valid node managed by this compute host."
+msgstr ""
+
+#: nova/compute/manager.py:358
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:318
+#: nova/compute/manager.py:377
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:332
+#: nova/compute/manager.py:389
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:346
+#: nova/compute/manager.py:403
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:356
+#: nova/compute/manager.py:413
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:375
+#: nova/compute/manager.py:447
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:448
+#: nova/compute/manager.py:523
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:541
+#: nova/compute/manager.py:595 nova/compute/manager.py:1834
+#, python-format
+msgid "No node specified, defaulting to %(node)s"
+msgstr ""
+
+#: nova/compute/manager.py:628
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:564
+#: nova/compute/manager.py:651
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:597 nova/compute/manager.py:1743
+#: nova/compute/manager.py:684 nova/compute/manager.py:1885
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:614
+#: nova/compute/manager.py:702
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:619
+#: nova/compute/manager.py:707
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:625
+#: nova/compute/manager.py:713
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:648
+#: nova/compute/manager.py:741
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:679
+#: nova/compute/manager.py:771
msgid "Instance has already been created"
msgstr ""
-#: nova/compute/manager.py:722
+#: nova/compute/manager.py:817
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:728
+#: nova/compute/manager.py:823
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:738
+#: nova/compute/manager.py:833
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:759
+#: nova/compute/manager.py:854
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:763
+#: nova/compute/manager.py:858
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:776
+#: nova/compute/manager.py:871
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:794
+#: nova/compute/manager.py:889
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:818
+#: nova/compute/manager.py:913
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:890
+#: nova/compute/manager.py:985
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:921
+#: nova/compute/manager.py:1016
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:924
+#: nova/compute/manager.py:1019
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:931
+#: nova/compute/manager.py:1026
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:955
+#: nova/compute/manager.py:1051
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:995 nova/compute/manager.py:1912
-#: nova/compute/manager.py:3112
+#: nova/compute/manager.py:1090 nova/compute/manager.py:2053
+#: nova/compute/manager.py:3388
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1128
+#: nova/compute/manager.py:1224
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1207
+#: nova/compute/manager.py:1311
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1231
+#: nova/compute/manager.py:1335
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1240
+#: nova/compute/manager.py:1344
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1277
+#: nova/compute/manager.py:1381
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1283
+#: nova/compute/manager.py:1387
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1336
+#: nova/compute/manager.py:1440
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1343
+#: nova/compute/manager.py:1447
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1348
+#: nova/compute/manager.py:1452
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1379
+#: nova/compute/manager.py:1483
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1386
+#: nova/compute/manager.py:1490
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1396
+#: nova/compute/manager.py:1500
msgid "set_admin_password is not implemented by this driver."
msgstr ""
-#: nova/compute/manager.py:1412
+#: nova/compute/manager.py:1516
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1420
+#: nova/compute/manager.py:1524
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1435
+#: nova/compute/manager.py:1539
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1439
+#: nova/compute/manager.py:1543
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1452
+#: nova/compute/manager.py:1564
+msgid ""
+"Unable to find a different image to use for rescue VM, using instance's "
+"current image"
+msgstr ""
+
+#: nova/compute/manager.py:1577
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1479
+#: nova/compute/manager.py:1611
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1500
+#: nova/compute/manager.py:1632
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1680
+#: nova/compute/manager.py:1801
+msgid "Instance has no source host"
+msgstr ""
+
+#: nova/compute/manager.py:1807
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1699
+#: nova/compute/manager.py:1816
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:1909
+#: nova/compute/manager.py:2050
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:1965
+#: nova/compute/manager.py:2106
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:1982
+#: nova/compute/manager.py:2123
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2020
+#: nova/compute/manager.py:2161
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2050
+#: nova/compute/manager.py:2191
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2066
+#: nova/compute/manager.py:2213
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2071
+#: nova/compute/manager.py:2218
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2074
+#: nova/compute/manager.py:2221
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2091
+#: nova/compute/manager.py:2238
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2116
+#: nova/compute/manager.py:2263
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2144
+#: nova/compute/manager.py:2291
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2188
+#: nova/compute/manager.py:2336
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2197
+#: nova/compute/manager.py:2345
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2212
+#: nova/compute/manager.py:2360
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2241
+#: nova/compute/manager.py:2390
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2245
+#: nova/compute/manager.py:2394
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2258
+#: nova/compute/manager.py:2407
#, python-format
msgid "Faild to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2302
+#: nova/compute/manager.py:2431
+msgid "Updating volume usage cache with totals"
+msgstr ""
+
+#: nova/compute/manager.py:2468
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2362
+#: nova/compute/manager.py:2541
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2422
+#: nova/compute/manager.py:2602
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2448
+#: nova/compute/manager.py:2630
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2481
-msgid "No floating_ip found"
-msgstr ""
-
-#: nova/compute/manager.py:2489
-msgid "No floating_ip found."
-msgstr ""
-
-#: nova/compute/manager.py:2491
-#, python-format
-msgid ""
-"Live migration: Unexpected error: cannot inherit floating ip.\n"
-"%(e)s"
-msgstr ""
-
-#: nova/compute/manager.py:2517
+#: nova/compute/manager.py:2683
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2519
+#: nova/compute/manager.py:2685
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2533
+#: nova/compute/manager.py:2699
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2664
+#: nova/compute/manager.py:2842
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2693
+#: nova/compute/manager.py:2887
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2698
+#: nova/compute/manager.py:2893
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2707
+#: nova/compute/manager.py:2902
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2714
+#: nova/compute/manager.py:2909
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2718
+#: nova/compute/manager.py:2913
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2725
+#: nova/compute/manager.py:2920
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2733
+#: nova/compute/manager.py:2928
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2750
+#: nova/compute/manager.py:2943
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2768
+#: nova/compute/manager.py:2961
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2791
+#: nova/compute/manager.py:2984
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:2856
+#: nova/compute/manager.py:3102
+msgid "Updating volume usage cache"
+msgstr ""
+
+#: nova/compute/manager.py:3120
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:2882
+#: nova/compute/manager.py:3149
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:2888 nova/compute/manager.py:2926
+#: nova/compute/manager.py:3155 nova/compute/manager.py:3193
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:2913
+#: nova/compute/manager.py:3180
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:2950
+#: nova/compute/manager.py:3216
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2962 nova/compute/manager.py:2973
-#: nova/compute/manager.py:2987
+#: nova/compute/manager.py:3228 nova/compute/manager.py:3239
+#: nova/compute/manager.py:3253
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:2967
+#: nova/compute/manager.py:3233
msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2980
+#: nova/compute/manager.py:3246
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2996
+#: nova/compute/manager.py:3262
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3004
-msgid "FLAGS.reclaim_instance_interval <= 0, skipping..."
+#: nova/compute/manager.py:3270
+msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3017
+#: nova/compute/manager.py:3285
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3066
+#: nova/compute/manager.py:3341
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3073
+#: nova/compute/manager.py:3348
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3080
+#: nova/compute/manager.py:3355
#, python-format
-msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:151
-msgid ""
-"Host field should be not be set on the instance until resources have been"
-" claimed."
-msgstr ""
-
-#: nova/compute/resource_tracker.py:168
-#, python-format
-msgid ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
-"%(vcpus)d"
+msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/resource_tracker.py:210
-#, python-format
+#: nova/compute/resource_tracker.py:92
msgid ""
-"Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d"
-" MB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:219
-msgid "Memory limit not specified, defaulting to unlimited"
+"Host field should not be set on the instance until resources have been "
+"claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:225
-#, python-format
-msgid "Memory limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d MB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:232
-#, python-format
+#: nova/compute/resource_tracker.py:97
msgid ""
-"Unable to claim resources. Free memory %(free_ram_mb)d MB < requested "
-"memory %(memory_mb)d MB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:241
-#, python-format
-msgid ""
-"Total disk: %(total_disk)d GB, used: %(used_disk)d GB, free: "
-"%(free_disk)d GB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:250
-msgid "Disk limit not specified, defaulting to unlimited"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:256
-#, python-format
-msgid "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:262
-#, python-format
-msgid ""
-"Unable to claim resources. Free disk %(free_disk_gb)d GB < requested "
-"disk %(disk_gb)d GB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:273
-#, python-format
-msgid "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:280
-msgid "VCPU limit not specified, defaulting to unlimited"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:284
-#, python-format
-msgid "CPU limit: %(vcpu_limit)d"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:291
-#, python-format
-msgid ""
-"Unable to claim resources. Free CPU %(free_vcpus)d < requested CPU "
-"%(vcpus)d"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:310
-#, python-format
-msgid "Finishing claim: %s"
+"Node field should be not be set on the instance until resources have been"
+" claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:325
-#, python-format
-msgid "Aborting claim: %s"
+#: nova/compute/resource_tracker.py:244
+msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:363
+#: nova/compute/resource_tracker.py:248
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:401
+#: nova/compute/resource_tracker.py:299
#, python-format
msgid "Compute_service record created for %s "
msgstr ""
-#: nova/compute/resource_tracker.py:406
+#: nova/compute/resource_tracker.py:304
#, python-format
msgid "Compute_service record updated for %s "
msgstr ""
-#: nova/compute/resource_tracker.py:425
+#: nova/compute/resource_tracker.py:317
#, python-format
msgid "No service record for host %s"
msgstr ""
-#: nova/compute/resource_tracker.py:435
+#: nova/compute/resource_tracker.py:327
#, python-format
msgid "Hypervisor: free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:436
+#: nova/compute/resource_tracker.py:328
#, python-format
msgid "Hypervisor: free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:441
+#: nova/compute/resource_tracker.py:333
#, python-format
msgid "Hypervisor: free VCPUs: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:443
+#: nova/compute/resource_tracker.py:335
msgid "Hypervisor: VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:450
+#: nova/compute/resource_tracker.py:342
#, python-format
msgid "Free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:451
+#: nova/compute/resource_tracker.py:343
#, python-format
msgid "Free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:456
+#: nova/compute/resource_tracker.py:348
#, python-format
msgid "Free VCPUS: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:458
+#: nova/compute/resource_tracker.py:350
msgid "Free VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:533
+#: nova/compute/resource_tracker.py:388
+#, python-format
+msgid "Updating from migration %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:446
+msgid "Instance not resizing, skipping migration."
+msgstr ""
+
+#: nova/compute/resource_tracker.py:538
+#, python-format
+msgid ""
+"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
+"memory"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:551
#, python-format
msgid "Missing keys: %s"
msgstr ""
@@ -3749,55 +3924,72 @@ msgstr ""
msgid "Unable to find host for Instance %s"
msgstr ""
-#: nova/compute/utils.py:87
+#: nova/compute/utils.py:101
#, python-format
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/console/manager.py:81 nova/console/vmrc_manager.py:63
+#: nova/conductor/manager.py:62
+#, python-format
+msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
+msgstr ""
+
+#: nova/conductor/manager.py:198
+msgid "Invalid block_device_mapping_destroy invocation"
+msgstr ""
+
+#: nova/console/manager.py:80 nova/console/vmrc_manager.py:62
msgid "Adding console"
msgstr ""
-#: nova/console/manager.py:102 nova/console/vmrc_manager.py:115
+#: nova/console/manager.py:101 nova/console/vmrc_manager.py:114
#, python-format
msgid "Tried to remove non-existent console %(console_id)s."
msgstr ""
-#: nova/console/vmrc_manager.py:118
+#: nova/console/vmrc_manager.py:117
#, python-format
msgid "Removing console %(console_id)s."
msgstr ""
-#: nova/console/xvp.py:98
+#: nova/console/xvp.py:99
msgid "Rebuilding xvp conf"
msgstr ""
-#: nova/console/xvp.py:116
+#: nova/console/xvp.py:118
#, python-format
msgid "Re-wrote %s"
msgstr ""
-#: nova/console/xvp.py:121
+#: nova/console/xvp.py:122
+msgid "Failed to write configuration file"
+msgstr ""
+
+#: nova/console/xvp.py:126
msgid "Stopping xvp"
msgstr ""
-#: nova/console/xvp.py:134
+#: nova/console/xvp.py:139
msgid "Starting xvp"
msgstr ""
-#: nova/console/xvp.py:141
+#: nova/console/xvp.py:146
#, python-format
msgid "Error starting xvp: %s"
msgstr ""
-#: nova/console/xvp.py:144
+#: nova/console/xvp.py:149
msgid "Restarting xvp"
msgstr ""
-#: nova/console/xvp.py:146
+#: nova/console/xvp.py:151
msgid "xvp not running..."
msgstr ""
+#: nova/console/xvp.py:202
+msgid "Failed to run xvp."
+msgstr ""
+
#: nova/consoleauth/manager.py:70
#, python-format
msgid "Received Token: %(token)s, %(token_dict)s)"
@@ -3808,179 +4000,130 @@ msgstr ""
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
-#: nova/db/sqlalchemy/api.py:208
-#, python-format
-msgid "Unrecognized read_deleted value '%s'"
+#: nova/db/api.py:580
+msgid "Failed to notify cells of instance destroy"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2792
-#, python-format
-msgid "Change will make usage less than 0 for the following resources: %(unders)s"
+#: nova/db/api.py:689 nova/db/api.py:710
+msgid "Failed to notify cells of instance update"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4693
-msgid "Backend exists"
+#: nova/db/api.py:749
+msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4713 nova/db/sqlalchemy/api.py:4739
-#, python-format
-msgid "No backend config with id %(sm_backend_id)s"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4751
-#, python-format
-msgid "No backend config with sr uuid %(sr_uuid)s"
+#: nova/db/api.py:1458
+msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4785
-msgid "Flavor exists"
+#: nova/db/api.py:1602
+msgid "Failed to notify cells of instance fault"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4800
+#: nova/db/sqlalchemy/api.py:182 nova/virt/baremetal/db/sqlalchemy/api.py:61
#, python-format
-msgid "%(sm_flavor_id) flavor not found"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4819
-#, python-format
-msgid "No sm_flavor called %(sm_flavor_id)s"
+msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4836
+#: nova/db/sqlalchemy/api.py:1374
#, python-format
-msgid "No sm_flavor called %(sm_flavor_label)s"
+msgid ""
+"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
+"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:4874
+#: nova/db/sqlalchemy/api.py:2736
#, python-format
-msgid "No sm_volume with id %(volume_id)s"
+msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
-#: nova/db/sqlalchemy/migration.py:74
+#: nova/db/sqlalchemy/migration.py:71
+#: nova/virt/baremetal/db/sqlalchemy/migration.py:69
msgid "version should be an integer"
msgstr ""
-#: nova/db/sqlalchemy/migration.py:101
+#: nova/db/sqlalchemy/migration.py:98
+#: nova/virt/baremetal/db/sqlalchemy/migration.py:96
msgid "Upgrade DB using Essex release first."
msgstr ""
-#: nova/db/sqlalchemy/session.py:316
-#, python-format
-msgid "SQL connection failed. %s attempts left."
+#: nova/db/sqlalchemy/session.py:384
+msgid "DB exception wrapped."
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:50
-#: nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py:60
-#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:53
-#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:86
-#: nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py:74
-#: nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py:97
-#: nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py:52
+#: nova/db/sqlalchemy/session.py:434
#, python-format
-msgid "Table |%s| not created!"
-msgstr ""
-
-#: nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:62
-msgid "quota_classes table not dropped"
-msgstr ""
-
-#: nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py:56
-#: nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py:56
-#: nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py:56
-#: nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py:56
-#: nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py:44
-#: nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py:67
-#: nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py:43
-#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:56
-#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:87
-#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:56
-#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:87
-msgid "foreign key constraint couldn't be removed"
+msgid "Got mysql server has gone away: %s"
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:84
-#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:128
-#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:178
-#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:236
-msgid "Foreign Key constraint couldn't be removed"
+#: nova/db/sqlalchemy/session.py:475
+msgid "Using mysql/eventlet db_pool."
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py:57
-msgid "foreign key could not be dropped"
-msgstr ""
-
-#: nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py:91
-msgid "foreign key could not be created"
-msgstr ""
-
-#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:98
-msgid "quota_usages table not dropped"
+#: nova/db/sqlalchemy/session.py:519
+#, python-format
+msgid "SQL connection failed. %s attempts left."
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:105
-msgid "reservations table not dropped"
+#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:62
+msgid "Exception while seeding instance_types table"
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py:45
-#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:66
-#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:107
-#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:66
-#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:107
-msgid "foreign key constraint couldn't be created"
+#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:927
+msgid "Exception while creating table."
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py:66
-msgid "foreign key constraint couldn't be dropped"
+#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:1225
+msgid "Downgrade from Folsom is unsupported."
msgstr ""
-#: nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py:93
-#: nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py:43
-msgid "user_quotas table not dropped"
+#: nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py:68
+msgid "volume_usage_cache table not dropped"
msgstr ""
-#: nova/image/glance.py:143
+#: nova/image/glance.py:159
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
"%(extra)s."
msgstr ""
-#: nova/image/s3.py:311
+#: nova/image/s3.py:312
#, python-format
msgid "Failed to download %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:328
+#: nova/image/s3.py:329
#, python-format
msgid "Failed to decrypt %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:338
+#: nova/image/s3.py:339
#, python-format
msgid "Failed to untar %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:348
+#: nova/image/s3.py:349
#, python-format
msgid "Failed to upload %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:372
+#: nova/image/s3.py:373
#, python-format
msgid "Failed to decrypt private key: %s"
msgstr ""
-#: nova/image/s3.py:379
+#: nova/image/s3.py:380
#, python-format
msgid "Failed to decrypt initialization vector: %s"
msgstr ""
-#: nova/image/s3.py:390
+#: nova/image/s3.py:391
#, python-format
msgid "Failed to decrypt image file %(image_file)s: %(err)s"
msgstr ""
-#: nova/image/s3.py:402
+#: nova/image/s3.py:403
msgid "Unsafe filenames in image"
msgstr ""
@@ -3999,178 +4142,237 @@ msgstr ""
msgid "Bad project_id for to_global_ipv6: %s"
msgstr ""
-#: nova/network/api.py:54
+#: nova/network/api.py:50
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
-#: nova/network/api.py:81
+#: nova/network/api.py:76
+msgid "Failed storing info cache"
+msgstr ""
+
+#: nova/network/api.py:77
#, python-format
msgid "args: %s"
msgstr ""
-#: nova/network/api.py:82
+#: nova/network/api.py:78
#, python-format
msgid "kwargs: %s"
msgstr ""
-#: nova/network/api.py:170
+#: nova/network/api.py:171
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
-#: nova/network/ldapdns.py:317
+#: nova/network/driver.py:39
+msgid "Network driver option required, but not specified"
+msgstr ""
+
+#: nova/network/driver.py:42
+#, python-format
+msgid "Loading network driver '%s'"
+msgstr ""
+
+#: nova/network/ldapdns.py:98
+#, python-format
+msgid ""
+"Found multiple matches for domain %(domain)s.\n"
+"%(entry)s"
+msgstr ""
+
+#: nova/network/ldapdns.py:125
+#, python-format
+msgid "Unable to dequalify. %(name)s is not in %(domain)s.\n"
+msgstr ""
+
+#: nova/network/ldapdns.py:320
msgid "This driver only supports type 'a' entries."
msgstr ""
-#: nova/network/linux_net.py:179
+#: nova/network/ldapdns.py:363 nova/network/minidns.py:169
+msgid "This shouldn't be getting called except during testing."
+msgstr ""
+
+#: nova/network/linux_net.py:190
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:214
+#: nova/network/linux_net.py:225
#, python-format
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:239
+#: nova/network/linux_net.py:250
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:374
+#: nova/network/linux_net.py:387
msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:580
+#: nova/network/linux_net.py:593
#, python-format
msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:790
+#: nova/network/linux_net.py:849
#, python-format
msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:830
+#: nova/network/linux_net.py:894
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:832
+#: nova/network/linux_net.py:896
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:895
+#: nova/network/linux_net.py:961
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:897
+#: nova/network/linux_net.py:963
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1127
+#: nova/network/linux_net.py:1100
+#, python-format
+msgid "Error clearing stale veth %s"
+msgstr ""
+
+#: nova/network/linux_net.py:1223
#, python-format
msgid "Starting VLAN inteface %s"
msgstr ""
-#: nova/network/linux_net.py:1163
+#: nova/network/linux_net.py:1254
+#, python-format
+msgid "Failed unplugging VLAN interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1257
#, python-format
-msgid "Starting Bridge interface for %s"
+msgid "Unplugged VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1206
+#: nova/network/linux_net.py:1279
+#, python-format
+msgid "Starting Bridge %s"
+msgstr ""
+
+#: nova/network/linux_net.py:1291
+#, python-format
+msgid "Adding interface %(interface)s to bridge %(bridge)s"
+msgstr ""
+
+#: nova/network/linux_net.py:1324
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1307
+#: nova/network/linux_net.py:1364
+#, python-format
+msgid "Failed unplugging bridge interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1367
+#, python-format
+msgid "Unplugged bridge interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1522
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1315
+#: nova/network/linux_net.py:1530
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1334
+#: nova/network/linux_net.py:1549
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1336
+#: nova/network/linux_net.py:1551
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:285
+#: nova/network/manager.py:326
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/manager.py:294 nova/network/manager.py:553
+#: nova/network/manager.py:335 nova/network/manager.py:606
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/manager.py:309
+#: nova/network/manager.py:350
#, python-format
msgid "floating IP allocation for instance |%s|"
msgstr ""
-#: nova/network/manager.py:373
+#: nova/network/manager.py:414
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/manager.py:391
+#: nova/network/manager.py:432
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/manager.py:395
+#: nova/network/manager.py:436
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/manager.py:416
+#: nova/network/manager.py:457
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/manager.py:477
+#: nova/network/manager.py:519
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/manager.py:674
+#: nova/network/manager.py:753
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:681
+#: nova/network/manager.py:760
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/manager.py:707
+#: nova/network/manager.py:790
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:715
+#: nova/network/manager.py:798
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/manager.py:762
+#: nova/network/manager.py:845
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4178,39 +4380,39 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/manager.py:808
+#: nova/network/manager.py:891
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/manager.py:818
+#: nova/network/manager.py:901
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/manager.py:932
+#: nova/network/manager.py:1017
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:936
+#: nova/network/manager.py:1021
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1050
+#: nova/network/manager.py:1148
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1055
+#: nova/network/manager.py:1153
#, python-format
msgid "networks retrieved for instance: |%(networks)s|"
msgstr ""
-#: nova/network/manager.py:1085
+#: nova/network/manager.py:1189
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1308
+#: nova/network/manager.py:1419
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4218,152 +4420,161 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1389
+#: nova/network/manager.py:1507
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1410
+#: nova/network/manager.py:1526
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1414
+#: nova/network/manager.py:1530
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1422
+#: nova/network/manager.py:1538
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1427
+#: nova/network/manager.py:1543
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1431
+#: nova/network/manager.py:1547
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1434
+#: nova/network/manager.py:1550
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1453
+#: nova/network/manager.py:1569
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1477
+#: nova/network/manager.py:1593
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1497
+#: nova/network/manager.py:1613
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1578
+#: nova/network/manager.py:1694
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1581
+#: nova/network/manager.py:1697
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1592
+#: nova/network/manager.py:1708
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1651
+#: nova/network/manager.py:1765
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1671
+#: nova/network/manager.py:1785
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2137
+#: nova/network/manager.py:2308
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2144
+#: nova/network/manager.py:2315
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s. Network size"
" is %(network_size)s"
msgstr ""
-#: nova/network/minidns.py:65
-msgid "This driver only supports type 'a'"
+#: nova/network/minidns.py:46
+#, python-format
+msgid "minidns file is |%s|"
msgstr ""
-#: nova/network/model.py:337
-msgid "v4 subnets are required for legacy nw_info"
+#: nova/network/minidns.py:73 nova/network/minidns.py:104
+msgid "Invalid name"
msgstr ""
-#: nova/network/quantum/nova_ipam_lib.py:75
-msgid "Error creating network entry"
+#: nova/network/minidns.py:76
+msgid "This driver only supports type 'a'"
msgstr ""
-#: nova/network/quantum/nova_ipam_lib.py:90
+#: nova/network/minidns.py:120
#, python-format
-msgid "No network with net_id = %s"
+msgid "Cannot delete entry |%s|"
msgstr ""
-#: nova/network/quantum/nova_ipam_lib.py:256
+#: nova/network/minidns.py:206
#, python-format
-msgid "No fixed IPs to deallocate for vif %s"
+msgid "Cannot delete domain |%s|"
msgstr ""
-#: nova/network/quantumv2/__init__.py:41
+#: nova/network/model.py:339
+msgid "v4 subnets are required for legacy nw_info"
+msgstr ""
+
+#: nova/network/quantumv2/__init__.py:40
msgid "_get_auth_token() failed"
msgstr ""
-#: nova/network/quantumv2/api.py:105
+#: nova/network/quantumv2/api.py:102
#, python-format
msgid "allocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:108
+#: nova/network/quantumv2/api.py:105
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:161
+#: nova/network/quantumv2/api.py:151
+msgid "Port not found"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:159
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:173
+#: nova/network/quantumv2/api.py:171
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:182
+#: nova/network/quantumv2/api.py:180
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:192
+#: nova/network/quantumv2/api.py:190
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:207
+#: nova/network/quantumv2/api.py:205
#, python-format
msgid "validate_networks() for %s"
msgstr ""
@@ -4373,37 +4584,37 @@ msgstr ""
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
-#: nova/openstack/common/lockutils.py:98
+#: nova/openstack/common/lockutils.py:97
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/lockutils.py:184
+#: nova/openstack/common/lockutils.py:183
#, python-format
msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:188
+#: nova/openstack/common/lockutils.py:187
#, python-format
msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:216
+#: nova/openstack/common/lockutils.py:215
#, python-format
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/log.py:177
+#: nova/openstack/common/log.py:168
#, python-format
msgid "Deprecated Config: %s"
msgstr ""
-#: nova/openstack/common/log.py:309
+#: nova/openstack/common/log.py:300
#, python-format
msgid "syslog facility must be one of: %s"
msgstr ""
-#: nova/openstack/common/log.py:467
+#: nova/openstack/common/log.py:458
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr ""
@@ -4440,7 +4651,11 @@ msgstr ""
msgid "Failed to load notifier %s. These notifications will not be sent."
msgstr ""
-#: nova/openstack/common/notifier/rabbit_notifier.py:45
+#: nova/openstack/common/notifier/rabbit_notifier.py:27
+msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead."
+msgstr ""
+
+#: nova/openstack/common/notifier/rpc_notifier.py:45
#, python-format
msgid "Could not send notification to %(topic)s. Payload=%(message)s"
msgstr ""
@@ -4450,147 +4665,162 @@ msgstr ""
msgid "Failed to load plugin %(plug)s: %(exc)s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:209
+#: nova/openstack/common/rpc/amqp.py:58
+msgid "Pool creating new connection"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:210
#, python-format
msgid "unpacked context: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:253
+#: nova/openstack/common/rpc/amqp.py:254
#, python-format
msgid "received %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:259
+#: nova/openstack/common/rpc/amqp.py:260
#, python-format
msgid "no method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:260
+#: nova/openstack/common/rpc/amqp.py:261
#, python-format
msgid "No method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:352
+#: nova/openstack/common/rpc/amqp.py:287
+#: nova/openstack/common/rpc/impl_zmq.py:263
+#, python-format
+msgid "Expected exception during message handling (%s)"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:293
+#: nova/openstack/common/rpc/impl_zmq.py:269
+msgid "Exception during message handling"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:359
#, python-format
msgid "Making asynchronous call on %s ..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:355
+#: nova/openstack/common/rpc/amqp.py:362
#, python-format
msgid "MSG_ID is %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:377
+#: nova/openstack/common/rpc/amqp.py:384
#, python-format
msgid "Making asynchronous cast on %s..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:385
+#: nova/openstack/common/rpc/amqp.py:392
msgid "Making asynchronous fanout cast..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:411
+#: nova/openstack/common/rpc/amqp.py:417
#, python-format
msgid "Sending %(event_type)s on %(topic)s"
msgstr ""
-#: nova/openstack/common/rpc/common.py:34
+#: nova/openstack/common/rpc/common.py:35
msgid "An unknown RPC related exception occurred."
msgstr ""
-#: nova/openstack/common/rpc/common.py:64
+#: nova/openstack/common/rpc/common.py:65
#, python-format
msgid ""
"Remote error: %(exc_type)s %(value)s\n"
"%(traceback)s."
msgstr ""
-#: nova/openstack/common/rpc/common.py:81
+#: nova/openstack/common/rpc/common.py:82
msgid "Timeout while waiting on RPC response."
msgstr ""
-#: nova/openstack/common/rpc/common.py:85
+#: nova/openstack/common/rpc/common.py:86
msgid "Invalid reuse of an RPC connection."
msgstr ""
-#: nova/openstack/common/rpc/common.py:89
+#: nova/openstack/common/rpc/common.py:90
#, python-format
msgid "Specified RPC version, %(version)s, not supported by this endpoint."
msgstr ""
-#: nova/openstack/common/rpc/common.py:206
+#: nova/openstack/common/rpc/common.py:208
#, python-format
msgid "Returning exception %s to caller"
msgstr ""
#: nova/openstack/common/rpc/impl_kombu.py:168
-#: nova/openstack/common/rpc/impl_qpid.py:144
+#: nova/openstack/common/rpc/impl_qpid.py:129
msgid "Failed to process message... skipping it."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:468
+#: nova/openstack/common/rpc/impl_kombu.py:469
#, python-format
msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:490
+#: nova/openstack/common/rpc/impl_kombu.py:491
#, python-format
msgid "Connected to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:527
+#: nova/openstack/common/rpc/impl_kombu.py:528
#, python-format
msgid ""
"Unable to connect to AMQP server on %(hostname)s:%(port)d after "
"%(max_retries)d tries: %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:543
+#: nova/openstack/common/rpc/impl_kombu.py:544
#, python-format
msgid ""
"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying "
"again in %(sleep_time)d seconds."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:595
-#: nova/openstack/common/rpc/impl_qpid.py:403
+#: nova/openstack/common/rpc/impl_kombu.py:596
+#: nova/openstack/common/rpc/impl_qpid.py:386
#, python-format
msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:613
-#: nova/openstack/common/rpc/impl_qpid.py:418
+#: nova/openstack/common/rpc/impl_kombu.py:614
+#: nova/openstack/common/rpc/impl_qpid.py:401
#, python-format
msgid "Timed out waiting for RPC response: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:617
-#: nova/openstack/common/rpc/impl_qpid.py:422
+#: nova/openstack/common/rpc/impl_kombu.py:618
+#: nova/openstack/common/rpc/impl_qpid.py:405
#, python-format
msgid "Failed to consume message from queue: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:651
-#: nova/openstack/common/rpc/impl_qpid.py:452
+#: nova/openstack/common/rpc/impl_kombu.py:652
+#: nova/openstack/common/rpc/impl_qpid.py:435
#, python-format
msgid "Failed to publish message to topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:359
+#: nova/openstack/common/rpc/impl_qpid.py:336
#, python-format
-msgid "Unable to connect to AMQP server: %s"
+msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:364
+#: nova/openstack/common/rpc/impl_qpid.py:342
#, python-format
msgid "Connected to AMQP server on %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:372
+#: nova/openstack/common/rpc/impl_qpid.py:355
msgid "Re-established AMQP queues"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:430
+#: nova/openstack/common/rpc/impl_qpid.py:413
msgid "Error processing message. Skipping it."
msgstr ""
@@ -4640,117 +4870,117 @@ msgstr ""
msgid "Running func with context: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:276
+#: nova/openstack/common/rpc/impl_zmq.py:283
msgid "Sending reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:344
+#: nova/openstack/common/rpc/impl_zmq.py:351
msgid "Registering reactor"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:356
+#: nova/openstack/common/rpc/impl_zmq.py:363
msgid "In reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:371
+#: nova/openstack/common/rpc/impl_zmq.py:378
msgid "Out reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:375
+#: nova/openstack/common/rpc/impl_zmq.py:382
msgid "Consuming socket"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:422
+#: nova/openstack/common/rpc/impl_zmq.py:429
#, python-format
msgid "CONSUMER GOT %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:432
+#: nova/openstack/common/rpc/impl_zmq.py:439
#, python-format
msgid "->response->%s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:442
+#: nova/openstack/common/rpc/impl_zmq.py:449
#, python-format
msgid "Created topic proxy: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:449
+#: nova/openstack/common/rpc/impl_zmq.py:456
#, python-format
msgid "ROUTER RELAY-OUT START %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:451
+#: nova/openstack/common/rpc/impl_zmq.py:458
#, python-format
msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:467
+#: nova/openstack/common/rpc/impl_zmq.py:474
#, python-format
msgid "CONSUMER RECEIVED DATA: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:469
+#: nova/openstack/common/rpc/impl_zmq.py:476
#, python-format
msgid "ROUTER RELAY-OUT %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:495
+#: nova/openstack/common/rpc/impl_zmq.py:502
#, python-format
msgid "Create Consumer for topic (%(topic)s)"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:511
+#: nova/openstack/common/rpc/impl_zmq.py:518
#, python-format
msgid "Consumer is a zmq.%s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:554
+#: nova/openstack/common/rpc/impl_zmq.py:561
msgid "Creating payload"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:567
+#: nova/openstack/common/rpc/impl_zmq.py:574
msgid "Creating queue socket for reply waiter"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:578
+#: nova/openstack/common/rpc/impl_zmq.py:585
msgid "Sending cast"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:581
+#: nova/openstack/common/rpc/impl_zmq.py:588
msgid "Cast sent; Waiting reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:584
+#: nova/openstack/common/rpc/impl_zmq.py:591
#, python-format
msgid "Received message: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:585
+#: nova/openstack/common/rpc/impl_zmq.py:592
msgid "Unpacking response"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:612
+#: nova/openstack/common/rpc/impl_zmq.py:619
#, python-format
msgid "%(msg)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:615
+#: nova/openstack/common/rpc/impl_zmq.py:622
#, python-format
msgid "Sending message(s) to: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:619
+#: nova/openstack/common/rpc/impl_zmq.py:626
msgid "No matchmaker results. Not casting."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:709
+#: nova/openstack/common/rpc/impl_zmq.py:716
msgid ""
"Matchmaker could not be loaded.\n"
"rpc_zmq_matchmaker is not a class."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:711
+#: nova/openstack/common/rpc/impl_zmq.py:718
msgid "Error loading Matchmaker."
msgstr ""
@@ -4772,122 +5002,117 @@ msgstr ""
msgid "Could not find another compute"
msgstr ""
-#: nova/scheduler/driver.py:66
+#: nova/scheduler/driver.py:61
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:70 nova/scheduler/manager.py:185
+#: nova/scheduler/driver.py:65 nova/scheduler/manager.py:186
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:112
-#, python-format
-msgid "Casted '%(method)s' to compute '%(host)s'"
-msgstr ""
-
-#: nova/scheduler/driver.py:127
-#, python-format
-msgid "Casted '%(method)s' to %(topic)s '%(host)s'"
-msgstr ""
-
-#: nova/scheduler/driver.py:175
+#: nova/scheduler/driver.py:142
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:183
+#: nova/scheduler/driver.py:150
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:315
+#: nova/scheduler/driver.py:282
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
"memory(host:%(avail)s <= instance:%(mem_inst)s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:57
+#: nova/scheduler/filter_scheduler.py:52
#, python-format
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:192
+#: nova/scheduler/filter_scheduler.py:190
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:219
+#: nova/scheduler/filter_scheduler.py:207
+#, python-format
+msgid "Error from last host: %(last_host)s (node %(last_node)s): %(exc)s"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:236
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:230
-msgid "Scheduler only understands Compute nodes (for now)"
-msgstr ""
-
-#: nova/scheduler/filter_scheduler.py:282
+#: nova/scheduler/filter_scheduler.py:290
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:292
+#: nova/scheduler/filter_scheduler.py:295
#, python-format
-msgid "Weighted %(weighted_host)s"
+msgid "Choosing host %(best_host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:247
+#: nova/scheduler/host_manager.py:305
#, python-format
-msgid "Host filter fails for ignored host %(host)s"
+msgid "Host filter ignoring hosts: %(ignored_hosts_str)s"
msgstr ""
-#: nova/scheduler/host_manager.py:254
+#: nova/scheduler/host_manager.py:314
#, python-format
-msgid "Host filter fails for non-forced host %(host)s"
+msgid ""
+"No hosts matched due to not matching 'force_hosts'value of "
+"'%(forced_hosts_str)s'"
msgstr ""
-#: nova/scheduler/host_manager.py:260
+#: nova/scheduler/host_manager.py:319
#, python-format
-msgid "Host filter function %(func)s failed for %(host)s"
+msgid "Host filter forcing available hosts to %(forced_hosts_str)s"
msgstr ""
-#: nova/scheduler/host_manager.py:266
+#: nova/scheduler/host_manager.py:348
#, python-format
-msgid "Host filter passes for %(host)s"
+msgid "Ignoring %(service_name)s service update from %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:329
+#: nova/scheduler/host_manager.py:353
#, python-format
-msgid "Received %(service_name)s service update from %(host)s."
+msgid "Received %(service_name)s service update from %(state_key)s."
msgstr ""
-#: nova/scheduler/host_manager.py:352
-msgid "host_manager only implemented for 'compute'"
-msgstr ""
-
-#: nova/scheduler/host_manager.py:360
+#: nova/scheduler/host_manager.py:371
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:169
+#: nova/scheduler/manager.py:170
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
-#: nova/scheduler/scheduler_options.py:70
+#: nova/scheduler/scheduler_options.py:68
#, python-format
msgid "Could not stat scheduler options file %(filename)s: '%(e)s'"
msgstr ""
-#: nova/scheduler/scheduler_options.py:79
+#: nova/scheduler/scheduler_options.py:77
#, python-format
msgid "Could not decode scheduler options: '%(e)s'"
msgstr ""
+#: nova/scheduler/filters/__init__.py:55
+msgid ""
+"Use 'nova.scheduler.filters.all_filters' instead of "
+"'nova.scheduler.filters.standard_filters'"
+msgstr ""
+
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:49
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:56
-#: nova/scheduler/filters/compute_capabilities_filter.py:48
+#: nova/scheduler/filters/compute_capabilities_filter.py:57
#, python-format
msgid "%(host_state)s fails instance_type extra_specs requirements"
msgstr ""
@@ -4902,11 +5127,11 @@ msgstr ""
msgid "%(host_state)s is disabled via capabilities"
msgstr ""
-#: nova/scheduler/filters/core_filter.py:46
+#: nova/scheduler/filters/core_filter.py:44
msgid "VCPUs not set; assuming CPU collection broken"
msgstr ""
-#: nova/scheduler/filters/disk_filter.py:48
+#: nova/scheduler/filters/disk_filter.py:46
#, python-format
msgid ""
"%(host_state)s does not have %(requested_disk)s MB usable disk, it only "
@@ -4939,37 +5164,108 @@ msgstr ""
msgid "%(host_state)s does not support requested instance_properties"
msgstr ""
-#: nova/scheduler/filters/io_ops_filter.py:43
+#: nova/scheduler/filters/io_ops_filter.py:41
#, python-format
msgid ""
"%(host_state)s fails I/O ops check: Max IOs per host is set to "
"%(max_io_ops)s"
msgstr ""
-#: nova/scheduler/filters/num_instances_filter.py:40
+#: nova/scheduler/filters/num_instances_filter.py:38
#, python-format
msgid ""
"%(host_state)s fails num_instances check: Max instances per host is set "
"to %(max_instances)s"
msgstr ""
-#: nova/scheduler/filters/ram_filter.py:47
+#: nova/scheduler/filters/ram_filter.py:45
#, python-format
msgid ""
"%(host_state)s does not have %(requested_ram)s MB usable ram, it only has"
" %(usable_ram)s MB usable ram."
msgstr ""
-#: nova/scheduler/filters/retry_filter.py:38
+#: nova/scheduler/filters/retry_filter.py:41
#, python-format
-msgid "Previously tried hosts: %(hosts)s. (host=%(host)s)"
+msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s"
msgstr ""
-#: nova/scheduler/filters/trusted_filter.py:202
+#: nova/scheduler/filters/trusted_filter.py:200
#, python-format
msgid "TCP: trust state of %(host)s:%(level)s(%(trust)s)"
msgstr ""
+#: nova/scheduler/weights/__init__.py:58
+msgid "least_cost has been deprecated in favor of the RAM Weigher."
+msgstr ""
+
+#: nova/servicegroup/api.py:48
+#, python-format
+msgid "ServiceGroup driver defined as an instance of %s"
+msgstr ""
+
+#: nova/servicegroup/api.py:54
+#, python-format
+msgid "unknown ServiceGroup driver name: %s"
+msgstr ""
+
+#: nova/servicegroup/api.py:70
+#, python-format
+msgid ""
+"Join new ServiceGroup member %(member_id)s to the %(group_id)s group, "
+"service = %(service)s"
+msgstr ""
+
+#: nova/servicegroup/api.py:77
+#, python-format
+msgid "Check if the given member [%s] is part of the ServiceGroup, is up"
+msgstr ""
+
+#: nova/servicegroup/api.py:86
+#, python-format
+msgid ""
+"Explicitly remove the given member %(member_id)s from the%(group_id)s "
+"group monitoring"
+msgstr ""
+
+#: nova/servicegroup/api.py:93
+#, python-format
+msgid "Returns ALL members of the [%s] ServiceGroup"
+msgstr ""
+
+#: nova/servicegroup/api.py:101
+#, python-format
+msgid "Returns one member of the [%s] group"
+msgstr ""
+
+#: nova/servicegroup/db_driver.py:36
+#, python-format
+msgid ""
+"DB_Driver: join new ServiceGroup member %(member_id)s to the %(group_id)s"
+" group, service = %(service)s"
+msgstr ""
+
+#: nova/servicegroup/db_driver.py:40
+msgid "service is a mandatory argument for DB based ServiceGroup driver"
+msgstr ""
+
+#: nova/servicegroup/db_driver.py:64
+#, python-format
+msgid "DB_Driver: get_all members of the %s group"
+msgstr ""
+
+#: nova/servicegroup/db_driver.py:81
+msgid "The service database object disappeared, Recreating it."
+msgstr ""
+
+#: nova/servicegroup/db_driver.py:96
+msgid "Recovered model server connection!"
+msgstr ""
+
+#: nova/servicegroup/db_driver.py:102
+msgid "model server went away"
+msgstr ""
+
#: nova/tests/fake_ldap.py:34
msgid "Attempted to instantiate singleton"
msgstr ""
@@ -4994,35 +5290,35 @@ msgstr ""
msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
msgstr ""
-#: nova/tests/fake_volume.py:180 nova/volume/cinder.py:159
+#: nova/tests/fake_volume.py:182 nova/volume/cinder.py:179
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:184 nova/volume/cinder.py:162
+#: nova/tests/fake_volume.py:186 nova/volume/cinder.py:182
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:168
+#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:188
msgid "already detached"
msgstr ""
-#: nova/tests/fakelibvirt.py:849
+#: nova/tests/fakelibvirt.py:852
msgid "Please extend mock libvirt module to support flags"
msgstr ""
-#: nova/tests/fakelibvirt.py:853
+#: nova/tests/fakelibvirt.py:856
msgid "Expected a list for 'auth' parameter"
msgstr ""
-#: nova/tests/fakelibvirt.py:857
+#: nova/tests/fakelibvirt.py:860
msgid "Expected a function in 'auth[0]' parameter"
msgstr ""
-#: nova/tests/fakelibvirt.py:861
+#: nova/tests/fakelibvirt.py:864
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
-#: nova/tests/test_misc.py:62
+#: nova/tests/test_misc.py:59
#, python-format
msgid ""
"The following migrations are missing a downgrade:\n"
@@ -5065,12 +5361,12 @@ msgstr ""
msgid "uuid"
msgstr ""
-#: nova/tests/test_xenapi.py:724
+#: nova/tests/test_xenapi.py:770
#, python-format
msgid "Creating files in %s to simulate guest agent"
msgstr ""
-#: nova/tests/test_xenapi.py:735
+#: nova/tests/test_xenapi.py:781
#, python-format
msgid "Removing simulated guest agent files in %s"
msgstr ""
@@ -5087,118 +5383,144 @@ msgstr ""
msgid "unexpected role header"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:2996
+#: nova/tests/api/openstack/compute/test_servers.py:3097
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3001
+#: nova/tests/api/openstack/compute/test_servers.py:3102
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3006
+#: nova/tests/api/openstack/compute/test_servers.py:3107
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:56
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:53
#, python-format
msgid "_create: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:65
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:62
#, python-format
msgid "_delete: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:74
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:71
#, python-format
msgid "_get: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:84
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:81
#, python-format
msgid "_get_all: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:128
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:125
#, python-format
msgid "test_snapshot_create: param=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:137
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:134
#, python-format
msgid "test_snapshot_create: resp_dict=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:159
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:185
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:156
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:182
#, python-format
msgid "test_snapshot_create_force: param=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:168
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:165
#, python-format
msgid "test_snapshot_create_force: resp_dict=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:221
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:218
#, python-format
msgid "test_snapshot_show: resp=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:247
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:244
#, python-format
msgid "test_snapshot_detail: resp_dict=%s"
msgstr ""
-#: nova/tests/compute/test_compute.py:619
-#: nova/tests/compute/test_compute.py:637
-#: nova/tests/compute/test_compute.py:673
-#: nova/tests/compute/test_compute.py:698
-#: nova/tests/compute/test_compute.py:2373
+#: nova/tests/compute/test_compute.py:650
+#: nova/tests/compute/test_compute.py:668
+#: nova/tests/compute/test_compute.py:719
+#: nova/tests/compute/test_compute.py:746
+#: nova/tests/compute/test_compute.py:2604
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:625
-#: nova/tests/compute/test_compute.py:660
-#: nova/tests/compute/test_compute.py:686
-#: nova/tests/compute/test_compute.py:716
+#: nova/tests/compute/test_compute.py:656
+#: nova/tests/compute/test_compute.py:691
+#: nova/tests/compute/test_compute.py:734
+#: nova/tests/compute/test_compute.py:764
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1093
+#: nova/tests/compute/test_compute.py:1182
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2384
+#: nova/tests/compute/test_compute.py:2615
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:141 nova/virt/hyperv/vmops.py:471
+#: nova/tests/compute/test_compute.py:3085
+msgid "wrong host/node"
+msgstr ""
+
+#: nova/tests/hyperv/hypervutils.py:150 nova/virt/hyperv/vmops.py:552
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:192 nova/virt/hyperv/vmops.py:406
+#: nova/tests/hyperv/hypervutils.py:208 nova/virt/hyperv/vmops.py:487
#, python-format
msgid "Failed to destroy vm %s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:235 nova/virt/hyperv/snapshotops.py:92
+#: nova/tests/hyperv/hypervutils.py:252 nova/virt/hyperv/snapshotops.py:92
#, python-format
msgid "Failed to get info for disk %s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:140
+#: nova/tests/hyperv/mockproxy.py:73
+#, python-format
+msgid "Couldn't find invocation num. %(c)d of attribute \"%(name)s\""
+msgstr ""
+
+#: nova/tests/hyperv/mockproxy.py:92 nova/tests/hyperv/mockproxy.py:120
+#, python-format
+msgid "Couldn't find attribute \"%s\""
+msgstr ""
+
+#: nova/tests/hyperv/mockproxy.py:97
+#, python-format
+msgid "Couldn't find attribute \"%(name)s\" with arguments \"%(params)s\""
+msgstr ""
+
+#: nova/tests/hyperv/mockproxy.py:100
+#, python-format
+msgid ""
+"Couldn't find invocation num. %(c)d of attribute \"%(name)s\" with "
+"arguments \"%(params)s\""
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:157
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:144
+#: nova/tests/integrated/test_api_samples.py:161
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5206,25 +5528,25 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:152
+#: nova/tests/integrated/test_api_samples.py:169
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:155
+#: nova/tests/integrated/test_api_samples.py:172
#, python-format
msgid ""
"Length mismatch: %(result)s\n"
"%(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:166
+#: nova/tests/integrated/test_api_samples.py:183
#, python-format
msgid "Result: %(res_obj)s not in %(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:183
-#: nova/tests/integrated/test_api_samples.py:196
+#: nova/tests/integrated/test_api_samples.py:201
+#: nova/tests/integrated/test_api_samples.py:214
#, python-format
msgid ""
"Values do not match:\n"
@@ -5286,504 +5608,727 @@ msgstr ""
msgid "Decoding JSON: %s"
msgstr ""
-#: nova/virt/configdrive.py:77
+#: nova/virt/configdrive.py:92
#, python-format
msgid "Added %(filepath)s to config drive"
msgstr ""
-#: nova/virt/firewall.py:176 nova/virt/libvirt/firewall.py:249
+#: nova/virt/driver.py:798
+msgid "Compute driver option required, but not specified"
+msgstr ""
+
+#: nova/virt/driver.py:801
+#, python-format
+msgid "Loading compute driver '%s'"
+msgstr ""
+
+#: nova/virt/driver.py:808
+#, python-format
+msgid "Unable to load the virtualization driver: %s"
+msgstr ""
+
+#: nova/virt/fake.py:195
+#, python-format
+msgid "Key '%(key)s' not in instances '%(inst)s'"
+msgstr ""
+
+#: nova/virt/firewall.py:179 nova/virt/libvirt/firewall.py:251
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/firewall.py:187
+#: nova/virt/firewall.py:190
msgid "Filters added to instance"
msgstr ""
-#: nova/virt/firewall.py:189
+#: nova/virt/firewall.py:192
msgid "Provider Firewall Rules refreshed"
msgstr ""
-#: nova/virt/firewall.py:357
+#: nova/virt/firewall.py:360
#, python-format
msgid "Adding security group rule: %r"
msgstr ""
-#: nova/virt/firewall.py:489 nova/virt/xenapi/firewall.py:80
+#: nova/virt/firewall.py:491 nova/virt/xenapi/firewall.py:75
#, python-format
msgid "Adding provider rule: %s"
msgstr ""
-#: nova/virt/images.py:114
+#: nova/virt/images.py:113
msgid "Snapshot list encountered but no header found!"
msgstr ""
-#: nova/virt/images.py:213
+#: nova/virt/images.py:212
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: nova/virt/images.py:219
+#: nova/virt/images.py:218
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:230
+#: nova/virt/images.py:229
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:93
-msgid "No domains exist."
+#: nova/virt/baremetal/driver.py:90
+#, python-format
+msgid "Request for baremetal node %s sent to wrong service host"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:142
+msgid "cpu_arch is not found in instance_type_extra_specs"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:182
+msgid "Baremetal node id not supplied to driver"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:241
+#, python-format
+msgid "Failed to update state record for baremetal node %s"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:260
+#, python-format
+msgid "Delete called on non-existing instance %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:95
+#: nova/virt/baremetal/ipmi.py:83
#, python-format
-msgid "============= initial domains =========== : %s"
+msgid "pid file %s does not contain any pid"
msgstr ""
-#: nova/virt/baremetal/dom.py:99
-msgid "Building domain: to be removed"
+#: nova/virt/baremetal/ipmi.py:106
+msgid "Node id not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/dom.py:103
-msgid "Not running domain: remove"
+#: nova/virt/baremetal/ipmi.py:108
+msgid "Address not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/dom.py:111
-msgid "domain running on an unknown node: discarded"
+#: nova/virt/baremetal/ipmi.py:110
+msgid "User not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/dom.py:127
+#: nova/virt/baremetal/ipmi.py:112
+msgid "Password not supplied to IPMI"
+msgstr ""
+
+#: nova/virt/baremetal/ipmi.py:128
#, python-format
-msgid "No such domain (%s)"
+msgid "ipmitool stdout: '%(out)s', stderr: '%(err)%s'"
+msgstr ""
+
+#: nova/virt/baremetal/ipmi.py:154
+msgid "IPMI power on failed"
msgstr ""
-#: nova/virt/baremetal/dom.py:134
+#: nova/virt/baremetal/ipmi.py:176
+msgid "IPMI power off failed"
+msgstr ""
+
+#: nova/virt/baremetal/ipmi.py:186
+msgid "IPMI set next bootdev failed"
+msgstr ""
+
+#: nova/virt/baremetal/ipmi.py:191
#, python-format
-msgid "Failed power down Bare-metal node %s"
+msgid "Activate node called, but node %s is already active"
msgstr ""
-#: nova/virt/baremetal/dom.py:143
-msgid "deactivate -> activate fails"
+#: nova/virt/baremetal/utils.py:39
+#, python-format
+msgid "Failed to inject data into image %(image)s. Error: %(e)s"
msgstr ""
-#: nova/virt/baremetal/dom.py:153
-msgid "destroy_domain: no such domain"
+#: nova/virt/baremetal/utils.py:47
+#, python-format
+msgid "Failed to unlink %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:154
+#: nova/virt/baremetal/utils.py:59
#, python-format
-msgid "No such domain %s"
+msgid "Failed to create symlink from %(source)s to %(link)s"
msgstr ""
-#: nova/virt/baremetal/dom.py:161
+#: nova/virt/baremetal/vif_driver.py:37
#, python-format
-msgid "Domains: %s"
+msgid "plug: instance_uuid=%(uuid)s vif=%(vif)s"
msgstr ""
-#: nova/virt/baremetal/dom.py:164
+#: nova/virt/baremetal/vif_driver.py:50
#, python-format
-msgid "After storing domains: %s"
+msgid "pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)"
msgstr ""
-#: nova/virt/baremetal/dom.py:167
-msgid "deactivation/removing domain failed"
+#: nova/virt/baremetal/vif_driver.py:57
+#, python-format
+msgid ""
+"Baremetal node: %(id)s has no available physical interface for virtual "
+"interface %(vif_uuid)s"
msgstr ""
-#: nova/virt/baremetal/dom.py:174
-msgid "===== Domain is being created ====="
+#: nova/virt/baremetal/vif_driver.py:63
+#, python-format
+msgid "unplug: instance_uuid=%(uuid)s vif=%(vif)s"
msgstr ""
-#: nova/virt/baremetal/dom.py:177
-msgid "Same domain name already exists"
+#: nova/virt/baremetal/vif_driver.py:71
+#, python-format
+msgid "pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)"
msgstr ""
-#: nova/virt/baremetal/dom.py:179
-msgid "create_domain: before get_idle_node"
+#: nova/virt/baremetal/vif_driver.py:75
+#, python-format
+msgid "no pif for vif_uuid=%s"
msgstr ""
-#: nova/virt/baremetal/dom.py:196
+#: nova/virt/baremetal/volume_driver.py:131
#, python-format
-msgid "Created new domain: %s"
+msgid "baremetal driver was unable to delete tid %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:211
+#: nova/virt/baremetal/volume_driver.py:195
#, python-format
-msgid "Failed to boot Bare-metal node %s"
+msgid "Could not determine iscsi initiator name for instance %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:220
-msgid "No such domain exists"
+#: nova/virt/baremetal/volume_driver.py:237
+#, python-format
+msgid "No fixed PXE IP is associated to %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:224
+#: nova/virt/baremetal/volume_driver.py:270
#, python-format
-msgid "change_domain_state: to new state %s"
+msgid "detach volume could not find tid for %s"
+msgstr ""
+
+#: nova/virt/baremetal/db/sqlalchemy/api.py:164
+msgid "instance_uuid must be supplied to bm_node_set_uuid_safe"
msgstr ""
-#: nova/virt/baremetal/dom.py:231
+#: nova/virt/baremetal/db/sqlalchemy/api.py:176
#, python-format
-msgid "Stored fake domains to the file: %s"
+msgid "Failed to associate instance %(uuid)s to baremetal node %(id)s."
msgstr ""
-#: nova/virt/baremetal/dom.py:242
-msgid "domain does not exist"
+#: nova/virt/baremetal/db/sqlalchemy/api.py:284
+msgid "No more PXE IPs available"
msgstr ""
-#: nova/virt/baremetal/driver.py:116
+#: nova/virt/baremetal/db/sqlalchemy/api.py:306
+#: nova/virt/baremetal/db/sqlalchemy/api.py:347
#, python-format
-msgid "Error encountered when destroying instance '%(name)s': %(ex)s"
+msgid "Baremetal interface %s not found"
msgstr ""
-#: nova/virt/baremetal/driver.py:130
+#: nova/virt/baremetal/db/sqlalchemy/api.py:357
#, python-format
-msgid "instance %(instance_name)s: deleting instance files %(target)s"
+msgid "Baremetal interface %s already in use"
msgstr ""
-#: nova/virt/baremetal/driver.py:157
+#: nova/virt/baremetal/db/sqlalchemy/api.py:371
#, python-format
-msgid "instance %s: rebooted"
+msgid "Baremetal virtual interface %s not found"
msgstr ""
-#: nova/virt/baremetal/driver.py:161
-msgid "_wait_for_reboot failed"
+#: nova/virt/disk/api.py:127
+#, python-format
+msgid "Checking if we can resize image %(image)s. size=%(size)s, CoW=%(use_cow)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:190
+#: nova/virt/disk/api.py:133
#, python-format
-msgid "instance %s: rescued"
+msgid "Cannot resize filesystem %s to a smaller size."
msgstr ""
-#: nova/virt/baremetal/driver.py:194
-msgid "_wait_for_rescue failed"
+#: nova/virt/disk/api.py:144
+#, python-format
+msgid "Unable to mount image %(image)s with error %(error)s. Cannot resize."
msgstr ""
-#: nova/virt/baremetal/driver.py:211
-msgid "<============= spawn of baremetal =============>"
+#: nova/virt/disk/api.py:154
+#, python-format
+msgid ""
+"Unable to determine label for image %(image)s with error %(errror)s. "
+"Cannot resize."
msgstr ""
-#: nova/virt/baremetal/driver.py:224
+#: nova/virt/disk/api.py:234
+msgid "image already mounted"
+msgstr ""
+
+#: nova/virt/disk/api.py:279
#, python-format
-msgid "instance %s: is building"
+msgid ""
+"Inject data image=%(image)s key=%(key)s net=%(net)s metadata=%(metadata)s"
+" admin_password=ha-ha-not-telling-you files=%(files)s "
+"partition=%(partition)s use_cow=%(use_cow)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:230
-msgid "Key is injected but instance is not running yet"
+#: nova/virt/disk/api.py:303
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': "
+"%(errors)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:239
+#: nova/virt/disk/api.py:320
#, python-format
-msgid "instance %s: booted"
+msgid "Failed to unmount container filesystem: %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:246
+#: nova/virt/disk/api.py:342
#, python-format
-msgid "~~~~~~ current state = %s ~~~~~~"
+msgid "Inject file fs=%(fs)s path=%(path)s append=%(append)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:248
+#: nova/virt/disk/api.py:351
#, python-format
-msgid "instance %s spawned successfully"
+msgid "Inject metadata fs=%(fs)s metadata=%(metadata)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:251
+#: nova/virt/disk/api.py:392
#, python-format
-msgid "instance %s:not booted"
+msgid "Inject key fs=%(fs)s key=%(key)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:254
-msgid "Baremetal assignment is overcommitted."
+#: nova/virt/disk/api.py:420
+#, python-format
+msgid "Inject key fs=%(fs)s net=%(net)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:338
+#: nova/virt/disk/api.py:446
#, python-format
-msgid "instance %s: Creating image"
+msgid "Inject admin password fs=%(fs)s admin_passwd=ha-ha-not-telling-you"
msgstr ""
-#: nova/virt/baremetal/driver.py:456
+#: nova/virt/disk/api.py:491
+msgid "Not implemented on Windows"
+msgstr ""
+
+#: nova/virt/disk/api.py:520
#, python-format
-msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s"
+msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/baremetal/driver.py:466
+#: nova/virt/disk/api.py:536
+#, python-format
+msgid "User %(username)s not found in shadow file."
+msgstr ""
+
+#: nova/virt/disk/mount/api.py:41
#, python-format
msgid ""
-"instance %(inst_name)s: ignoring error injecting data into image "
-"%(img_id)s (%(e)s)"
+"Instance for format imgfile=%(imgfile)s mountdir=%(mountdir)s "
+"partition=%(partition)s imgfmt=%(imgfmt)s"
+msgstr ""
+
+#: nova/virt/disk/mount/api.py:45 nova/virt/disk/mount/api.py:61
+#: nova/virt/disk/vfs/localfs.py:67
+msgid "Using LoopMount"
+msgstr ""
+
+#: nova/virt/disk/mount/api.py:50 nova/virt/disk/mount/api.py:66
+#: nova/virt/disk/vfs/localfs.py:72
+msgid "Using NbdMount"
msgstr ""
-#: nova/virt/baremetal/driver.py:512
+#: nova/virt/disk/mount/api.py:57
#, python-format
-msgid "instance %s: starting toXML method"
+msgid ""
+"Instance for device imgfile=%(imgfile)s mountdir=%(mountdir)s "
+"partition=%(partition)s device=%(device)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:515
+#: nova/virt/disk/mount/api.py:117
+msgid "Device allocation failed. Will retry in 2 seconds."
+msgstr ""
+
+#: nova/virt/disk/mount/api.py:120
+msgid "Device allocation failed after repeated retries."
+msgstr ""
+
+#: nova/virt/disk/mount/api.py:135
#, python-format
-msgid "instance %s: finished toXML method"
+msgid "Map dev %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:559 nova/virt/hyperv/hostops.py:43
-#: nova/virt/libvirt/driver.py:1988
-msgid ""
-"Cannot get the number of cpu, because this function is not implemented "
-"for this platform. This error can be safely ignored for now."
+#: nova/virt/disk/mount/api.py:140
+#, python-format
+msgid "partition search unsupported with %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:682
+#: nova/virt/disk/mount/api.py:156
#, python-format
-msgid "#### RLK: cpu_arch = %s "
+msgid "partition %s not found"
msgstr ""
-#: nova/virt/baremetal/driver.py:699
-msgid "Updating!"
+#: nova/virt/disk/mount/api.py:157
+#, python-format
+msgid "Failed to map partitions: %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:726 nova/virt/hyperv/hostops.py:141
-#: nova/virt/libvirt/driver.py:3030 nova/virt/xenapi/host.py:149
-msgid "Updating host stats"
+#: nova/virt/disk/mount/api.py:179
+#, python-format
+msgid "Unmap dev %s"
msgstr ""
-#: nova/virt/baremetal/nodes.py:42
+#: nova/virt/disk/mount/api.py:187
#, python-format
-msgid "Unknown baremetal driver %(d)s"
+msgid "Mount %(dev)s on %(dir)s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:184
-msgid "free_node...."
+#: nova/virt/disk/mount/api.py:192
+#, python-format
+msgid "Failed to mount filesystem: %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:215
+#: nova/virt/disk/mount/api.py:202
#, python-format
-msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s"
+msgid "Umount %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:220
-msgid "status of node is set to 0"
+#: nova/virt/disk/mount/api.py:213
+msgid "Fail to mount, tearing back down"
msgstr ""
-#: nova/virt/baremetal/tilera.py:231
-msgid "rootfs is already removed"
+#: nova/virt/disk/mount/loop.py:33
+#, python-format
+msgid "Could not attach image to loopback: %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:263
-msgid "Before ping to the bare-metal node"
+#: nova/virt/disk/mount/loop.py:34
+#, python-format
+msgid "Loop mount error: %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:274
+#: nova/virt/disk/mount/loop.py:40
#, python-format
-msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"
+msgid "Got loop device %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:278
+#: nova/virt/disk/mount/loop.py:58
#, python-format
-msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s"
+msgid "Release loop device %s"
+msgstr ""
+
+#: nova/virt/disk/mount/nbd.py:54 nova/virt/disk/mount/nbd.py:68
+msgid "No free nbd devices"
+msgstr ""
+
+#: nova/virt/disk/mount/nbd.py:59
+msgid "nbd module not loaded"
msgstr ""
-#: nova/virt/baremetal/tilera.py:290
-msgid "Noting to do for tilera nodes: vmlinux is in CF"
+#: nova/virt/disk/mount/nbd.py:60
+msgid "nbd unavailable: module not loaded"
msgstr ""
-#: nova/virt/baremetal/tilera.py:313
-msgid "activate_node"
+#: nova/virt/disk/mount/nbd.py:85
+#, python-format
+msgid "Get nbd device %(dev)s for %(imgfile)s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:327
-msgid "Node is unknown error state."
+#: nova/virt/disk/mount/nbd.py:90
+#, python-format
+msgid "qemu-nbd error: %s"
msgstr ""
-#: nova/virt/disk/api.py:196
-msgid "no capable image handler configured"
+#: nova/virt/disk/mount/nbd.py:91 nova/virt/disk/mount/nbd.py:104
+#, python-format
+msgid "NBD mount error: %s"
msgstr ""
-#: nova/virt/disk/api.py:243
+#: nova/virt/disk/mount/nbd.py:103
#, python-format
-msgid "no disk image handler for: %s"
+msgid "nbd device %s did not show up"
msgstr ""
-#: nova/virt/disk/api.py:255
-msgid "image already mounted"
+#: nova/virt/disk/mount/nbd.py:110
+#, python-format
+msgid "Detaching from erroneous nbd device returned error: %s"
msgstr ""
-#: nova/virt/disk/api.py:321
+#: nova/virt/disk/mount/nbd.py:125
+#, python-format
+msgid "Release nbd device %s"
+msgstr ""
+
+#: nova/virt/disk/vfs/api.py:27
#, python-format
msgid ""
-"Failed to mount container filesystem '%(image)s' on '%(target)s': "
-"%(errors)s"
+"Instance for image imgfile=%(imgfile)s imgfmt=%(imgfmt)s "
+"partition=%(partition)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/api.py:32
+msgid "Trying to import guestfs"
+msgstr ""
+
+#: nova/virt/disk/vfs/api.py:39
+msgid "Using primary VFSGuestFS"
msgstr ""
-#: nova/virt/disk/api.py:338
+#: nova/virt/disk/vfs/api.py:44
+msgid "Falling back to VFSLocalFS"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:52
#, python-format
-msgid "Failed to unmount container filesystem: %s"
+msgid "Mount guest OS image %(imgfile)s partition %(part)s"
msgstr ""
-#: nova/virt/disk/api.py:371
-msgid "injected file path not valid"
+#: nova/virt/disk/vfs/guestfs.py:61
+#, python-format
+msgid "Inspecting guest OS image %s"
msgstr ""
-#: nova/virt/disk/api.py:516
-msgid "Not implemented on Windows"
+#: nova/virt/disk/vfs/guestfs.py:65
+#, python-format
+msgid "No operating system found in %s"
msgstr ""
-#: nova/virt/disk/api.py:550
+#: nova/virt/disk/vfs/guestfs.py:69
#, python-format
-msgid "User %(username)s not found in password file."
+msgid "Multi-boot OS %(roots)s"
msgstr ""
-#: nova/virt/disk/api.py:566
+#: nova/virt/disk/vfs/guestfs.py:71
#, python-format
-msgid "User %(username)s not found in shadow file."
+msgid "Multi-boot operating system found in %s"
msgstr ""
-#: nova/virt/disk/guestfs.py:41
+#: nova/virt/disk/vfs/guestfs.py:77
#, python-format
-msgid "unsupported partition: %s"
+msgid "Inspecting guest OS root filesystem %s"
msgstr ""
-#: nova/virt/disk/guestfs.py:66 nova/virt/disk/guestfs.py:80
-#: nova/virt/disk/mount.py:132
+#: nova/virt/disk/vfs/guestfs.py:82
#, python-format
-msgid "Failed to mount filesystem: %s"
+msgid "No mount points found in %(root)s of %(imgfile)s"
msgstr ""
-#: nova/virt/disk/guestfs.py:79
-msgid "unknown guestmount error"
+#: nova/virt/disk/vfs/guestfs.py:87
+#, python-format
+msgid "Mounting %(dev)s at %(dir)s"
msgstr ""
-#: nova/virt/disk/guestfs.py:119
+#: nova/virt/disk/vfs/guestfs.py:92
#, python-format
-msgid "Failed to umount image at %s, guestmount was still running after 10s"
+msgid "Setting up appliance for %(imgfile)s %(imgfmt)s"
msgstr ""
-#: nova/virt/disk/loop.py:31
+#: nova/virt/disk/vfs/guestfs.py:106
#, python-format
-msgid "Could not attach image to loopback: %s"
+msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:113
+msgid "Tearing down appliance"
msgstr ""
-#: nova/virt/disk/mount.py:83
+#: nova/virt/disk/vfs/guestfs.py:117
#, python-format
-msgid "partition search unsupported with %s"
+msgid "Failed to close augeas %s"
msgstr ""
-#: nova/virt/disk/mount.py:99
+#: nova/virt/disk/vfs/guestfs.py:121
#, python-format
-msgid "partition %s not found"
+msgid "Failed to shutdown appliance %s"
msgstr ""
-#: nova/virt/disk/mount.py:100
+#: nova/virt/disk/vfs/guestfs.py:125
#, python-format
-msgid "Failed to map partitions: %s"
+msgid "Failed to close guest handle %s"
msgstr ""
-#: nova/virt/disk/nbd.py:59
-msgid "nbd unavailable: module not loaded"
+#: nova/virt/disk/vfs/guestfs.py:135 nova/virt/disk/vfs/localfs.py:102
+#, python-format
+msgid "Make directory path=%(path)s"
msgstr ""
-#: nova/virt/disk/nbd.py:64
-msgid "No free nbd devices"
+#: nova/virt/disk/vfs/guestfs.py:140 nova/virt/disk/vfs/localfs.py:107
+#, python-format
+msgid "Append file path=%(path)s"
msgstr ""
-#: nova/virt/disk/nbd.py:86
+#: nova/virt/disk/vfs/guestfs.py:145 nova/virt/disk/vfs/localfs.py:116
#, python-format
-msgid "qemu-nbd error: %s"
+msgid "Replace file path=%(path)s"
msgstr ""
-#: nova/virt/disk/nbd.py:98
+#: nova/virt/disk/vfs/guestfs.py:150 nova/virt/disk/vfs/localfs.py:125
#, python-format
-msgid "nbd device %s did not show up"
+msgid "Read file path=%(path)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:155 nova/virt/disk/vfs/localfs.py:131
+#, python-format
+msgid "Has file path=%(path)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:164
+#, python-format
+msgid "Set permissions path=%(path)s mode=%(mode)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:169
+#, python-format
+msgid "Set ownership path=%(path)s user=%(user)s group=%(group)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:182
+#, python-format
+msgid "chown uid=%(uid)d gid=%(gid)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/localfs.py:46
+#, python-format
+msgid "File path %s not valid"
msgstr ""
-#: nova/virt/hyperv/driver.py:190 nova/virt/hyperv/driver.py:193
+#: nova/virt/disk/vfs/localfs.py:80
+#, python-format
+msgid "Failed to mount image %(ex)s)"
+msgstr ""
+
+#: nova/virt/disk/vfs/localfs.py:90
+#, python-format
+msgid "Failed to unmount %(imgdir)s: %(ex)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/localfs.py:96
+#, python-format
+msgid "Failed to remove %(imgdir)s: %(ex)s"
+msgstr ""
+
+#: nova/virt/disk/vfs/localfs.py:139
+#, python-format
+msgid "Set permissions path=%(path)s mode=%(mode)o"
+msgstr ""
+
+#: nova/virt/disk/vfs/localfs.py:144
+#, python-format
+msgid "Set permissions path=%(path)s user=%(user)s group=%(group)s"
+msgstr ""
+
+#: nova/virt/hyperv/basevolumeutils.py:55
+msgid "The ISCSI initiator name can't be found. Choosing the default one"
+msgstr ""
+
+#: nova/virt/hyperv/basevolumeutils.py:79 nova/virt/libvirt/driver.py:1421
+#: nova/virt/xenapi/vm_utils.py:504
+#, python-format
+msgid "block_device_list %s"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:183 nova/virt/hyperv/driver.py:186
msgid "plug_vifs called"
msgstr ""
-#: nova/virt/hyperv/driver.py:196
+#: nova/virt/hyperv/driver.py:189
msgid "ensure_filtering_rules_for_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:201
+#: nova/virt/hyperv/driver.py:194
msgid "unfilter_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:205
+#: nova/virt/hyperv/driver.py:198
msgid "confirm_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:210
+#: nova/virt/hyperv/driver.py:203
msgid "finish_revert_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:216
+#: nova/virt/hyperv/driver.py:209
msgid "finish_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:219
+#: nova/virt/hyperv/driver.py:212
msgid "get_console_output called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:112
+#: nova/virt/hyperv/hostops.py:78
+msgid ""
+"Cannot get the number of cpu, because this function is not implemented "
+"for this platform. This error can be safely ignored for now."
+msgstr ""
+
+#: nova/virt/hyperv/hostops.py:134 nova/virt/hyperv/volumeops.py:85
#, python-format
msgid "Windows version: %s "
msgstr ""
-#: nova/virt/hyperv/hostops.py:124
+#: nova/virt/hyperv/hostops.py:146
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:161
+#: nova/virt/hyperv/hostops.py:163 nova/virt/libvirt/driver.py:3105
+#: nova/virt/xenapi/host.py:149
+msgid "Updating host stats"
+msgstr ""
+
+#: nova/virt/hyperv/hostops.py:183
msgid "get_host_stats called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:52
+#: nova/virt/hyperv/livemigrationops.py:53
msgid ""
"Live migration is not supported \" \"by this version "
"of Hyper-V"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:61
+#: nova/virt/hyperv/livemigrationops.py:62
msgid "Live migration is not enabled on this host"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:64
+#: nova/virt/hyperv/livemigrationops.py:65
msgid "Live migration networks are not configured on this host"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:68
+#: nova/virt/hyperv/livemigrationops.py:69
msgid "live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:94
+#: nova/virt/hyperv/livemigrationops.py:95
#, python-format
msgid "Getting live migration networks for remote host: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:113
+#: nova/virt/hyperv/livemigrationops.py:114
#, python-format
msgid "Starting live migration for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:126
+#: nova/virt/hyperv/livemigrationops.py:127
#, python-format
msgid "Failed to live migrate VM %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:129
+#: nova/virt/hyperv/livemigrationops.py:130
#, python-format
msgid "Calling live migration recover_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:133
+#: nova/virt/hyperv/livemigrationops.py:134
#, python-format
msgid "Calling live migration post_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:139
+#: nova/virt/hyperv/livemigrationops.py:140
msgid "pre_live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:157
+#: nova/virt/hyperv/livemigrationops.py:158
msgid "post_live_migration_at_destination called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:161
+#: nova/virt/hyperv/livemigrationops.py:162
#, python-format
msgid "compare_cpu called %s"
msgstr ""
@@ -5866,188 +6411,207 @@ msgstr ""
msgid "Removing folder %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:67
+#: nova/virt/hyperv/vmops.py:80
msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:91
+#: nova/virt/hyperv/vmops.py:103
#, python-format
msgid "hyperv vm state: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:97
+#: nova/virt/hyperv/vmops.py:109
#, python-format
msgid ""
-"Got Info for vm %(instance_name)s: state=%(state)s, mem=%(memusage)s, "
+"Got Info for vm %(instance_name)s: state=%(state)d, mem=%(memusage)s, "
"num_cpu=%(numprocs)s, uptime=%(uptime)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:133
+#: nova/virt/hyperv/vmops.py:144
#, python-format
msgid "cache image failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:152
+#: nova/virt/hyperv/vmops.py:168
#, python-format
msgid "Starting VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:154
+#: nova/virt/hyperv/vmops.py:170
#, python-format
msgid "Started VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:156
+#: nova/virt/hyperv/vmops.py:172
#, python-format
msgid "spawn vm failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:175
+#: nova/virt/hyperv/vmops.py:178
+#, python-format
+msgid "Invalid config_drive_format \"%s\""
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:181 nova/virt/libvirt/driver.py:1362
+msgid "Using config drive"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:192 nova/virt/libvirt/driver.py:1371
+#, python-format
+msgid "Creating config drive at %(path)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:199 nova/virt/libvirt/driver.py:1377
+#, python-format
+msgid "Creating config drive failed with error: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:238
#, python-format
msgid "Failed to create VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:178
+#: nova/virt/hyperv/vmops.py:241
#, python-format
msgid "Created VM %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:195
+#: nova/virt/hyperv/vmops.py:258
#, python-format
msgid "Set memory for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:208
+#: nova/virt/hyperv/vmops.py:271
#, python-format
msgid "Set vcpus for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:212
+#: nova/virt/hyperv/vmops.py:275
#, python-format
msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
msgstr ""
-#: nova/virt/hyperv/vmops.py:221
+#: nova/virt/hyperv/vmops.py:284
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmops.py:229
+#: nova/virt/hyperv/vmops.py:292
#, python-format
msgid "Failed to add scsi controller to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:234
+#: nova/virt/hyperv/vmops.py:309
#, python-format
-msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s"
+msgid "Creating disk for %(vm_name)s by attaching disk file %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:261
+#: nova/virt/hyperv/vmops.py:337
#, python-format
-msgid "Failed to add diskdrive to VM %s"
+msgid "Failed to add drive to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:264
+#: nova/virt/hyperv/vmops.py:340
#, python-format
-msgid "New disk drive path is %s"
+msgid "New %(drive_type)s drive path is %(drive_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:283
+#: nova/virt/hyperv/vmops.py:365
#, python-format
-msgid "Failed to add vhd file to VM %s"
+msgid "Failed to add %(drive_type)s image to VM %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:285
+#: nova/virt/hyperv/vmops.py:367
#, python-format
-msgid "Created disk for %s"
+msgid "Created drive type %(drive_type)s for %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:289
+#: nova/virt/hyperv/vmops.py:372
#, python-format
msgid "Creating nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:294
+#: nova/virt/hyperv/vmops.py:377
msgid "Cannot find vSwitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:314
+#: nova/virt/hyperv/vmops.py:397
msgid "Failed creating a port on the external vswitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:315
+#: nova/virt/hyperv/vmops.py:398
#, python-format
msgid "Failed creating port for %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:318
+#: nova/virt/hyperv/vmops.py:401
#, python-format
msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:330
+#: nova/virt/hyperv/vmops.py:413
#, python-format
msgid "Failed to add nic to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:332
+#: nova/virt/hyperv/vmops.py:415
#, python-format
msgid "Created nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:339 nova/virt/hyperv/vmops.py:342
+#: nova/virt/hyperv/vmops.py:422 nova/virt/hyperv/vmops.py:425
#, python-format
msgid "Attempting to bind NIC to %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:347
+#: nova/virt/hyperv/vmops.py:430
msgid "No vSwitch specified, attaching to default"
msgstr ""
-#: nova/virt/hyperv/vmops.py:372
+#: nova/virt/hyperv/vmops.py:453
#, python-format
msgid "Got request to destroy vm %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:416
+#: nova/virt/hyperv/vmops.py:497
#, python-format
-msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
+msgid "Del: disk %(vhdfile)s vm %(name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:422
+#: nova/virt/hyperv/vmops.py:503
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:427
+#: nova/virt/hyperv/vmops.py:508
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:433
+#: nova/virt/hyperv/vmops.py:514
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:438
+#: nova/virt/hyperv/vmops.py:519
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:443
+#: nova/virt/hyperv/vmops.py:524
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:448
+#: nova/virt/hyperv/vmops.py:529
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:468
+#: nova/virt/hyperv/vmops.py:549
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:497
+#: nova/virt/hyperv/vmops.py:578
#, python-format
msgid "use_cow_image:%s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:517
+#: nova/virt/hyperv/vmops.py:598
#, python-format
msgid "Failed to create Difference Disk from %(base)s to %(target)s"
msgstr ""
@@ -6057,193 +6621,203 @@ msgstr ""
msgid "duplicate name found: %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:68
+#: nova/virt/hyperv/vmutils.py:72
#, python-format
msgid ""
-"WMI job failed: %(ErrorSummaryDescription)s - %(ErrorDescription)s - "
-"%(ErrorCode)s"
+"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s"
+" - %(err_desc)s - Error code: %(err_code)d"
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:78
+#, python-format
+msgid "WMI job failed with status %(job_state)d. Error details: %(error)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:81
+#, python-format
+msgid "WMI job failed with status %(job_state)d. No error description available"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:73
+#: nova/virt/hyperv/vmutils.py:86
#, python-format
-msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s "
+msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:80 nova/virt/hyperv/vmutils.py:96
+#: nova/virt/hyperv/vmutils.py:93 nova/virt/hyperv/vmutils.py:113
#, python-format
msgid "Creating folder %s "
msgstr ""
-#: nova/virt/hyperv/vmutils.py:94
+#: nova/virt/hyperv/vmutils.py:111
#, python-format
msgid "Removing existing folder %s "
msgstr ""
-#: nova/virt/hyperv/volumeops.py:69 nova/virt/xenapi/vm_utils.py:483
+#: nova/virt/hyperv/volumeops.py:90 nova/virt/xenapi/vm_utils.py:511
#, python-format
msgid "block device info: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:97
+#: nova/virt/hyperv/volumeops.py:118
#, python-format
msgid "Attach boot from volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:100
+#: nova/virt/hyperv/volumeops.py:121
#, python-format
msgid "Unable to attach boot volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:109 nova/virt/xenapi/volumeops.py:114
+#: nova/virt/hyperv/volumeops.py:130 nova/virt/xenapi/volumeops.py:115
#, python-format
msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:134
+#: nova/virt/hyperv/volumeops.py:155
#, python-format
msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:137 nova/virt/xenapi/volumeops.py:190
+#: nova/virt/hyperv/volumeops.py:158 nova/virt/xenapi/volumeops.py:182
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:156
+#: nova/virt/hyperv/volumeops.py:177
#, python-format
msgid "Failed to add volume to VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:170
+#: nova/virt/hyperv/volumeops.py:191
#, python-format
msgid "Detach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:187
+#: nova/virt/hyperv/volumeops.py:208
#, python-format
msgid "Mounted disk to detach is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:188
+#: nova/virt/hyperv/volumeops.py:209
#, python-format
msgid "host_resource disk detached is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:191
+#: nova/virt/hyperv/volumeops.py:212
#, python-format
msgid "Physical disk detached is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:198
+#: nova/virt/hyperv/volumeops.py:219
#, python-format
msgid "Failed to remove volume from VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:605
+#: nova/virt/hyperv/volumeops.py:228 nova/virt/libvirt/driver.py:624
msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:224
+#: nova/virt/hyperv/volumeops.py:245
#, python-format
msgid "device.InitiatorName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:225
+#: nova/virt/hyperv/volumeops.py:246
#, python-format
msgid "device.TargetName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:226
+#: nova/virt/hyperv/volumeops.py:247
#, python-format
msgid "device.ScsiPortNumber: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:227
+#: nova/virt/hyperv/volumeops.py:248
#, python-format
msgid "device.ScsiPathId: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:228
+#: nova/virt/hyperv/volumeops.py:249
#, python-format
msgid "device.ScsiTargetId): %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:229
+#: nova/virt/hyperv/volumeops.py:250
#, python-format
msgid "device.ScsiLun: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:230
+#: nova/virt/hyperv/volumeops.py:251
#, python-format
msgid "device.DeviceInterfaceGuid :%s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:232
+#: nova/virt/hyperv/volumeops.py:253
#, python-format
msgid "device.DeviceInterfaceName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:234
+#: nova/virt/hyperv/volumeops.py:255
#, python-format
msgid "device.LegacyName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:235
+#: nova/virt/hyperv/volumeops.py:256
#, python-format
msgid "device.DeviceType: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:236
+#: nova/virt/hyperv/volumeops.py:257
#, python-format
msgid "device.DeviceNumber %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:237
+#: nova/virt/hyperv/volumeops.py:258
#, python-format
msgid "device.PartitionNumber :%s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:243 nova/virt/hyperv/volumeops.py:262
+#: nova/virt/hyperv/volumeops.py:264 nova/virt/hyperv/volumeops.py:283
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:245
+#: nova/virt/hyperv/volumeops.py:266
#, python-format
msgid "Device number : %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:246
+#: nova/virt/hyperv/volumeops.py:267
#, python-format
msgid "Target lun : %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:252 nova/virt/hyperv/volumeops.py:259
+#: nova/virt/hyperv/volumeops.py:273 nova/virt/hyperv/volumeops.py:280
#, python-format
msgid "Mounted disk is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:276
+#: nova/virt/hyperv/volumeops.py:297
#, python-format
msgid "Drive number to disconnect is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:283
+#: nova/virt/hyperv/volumeops.py:304
#, python-format
msgid "DeviceNumber : %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:288
+#: nova/virt/hyperv/volumeops.py:309
#, python-format
msgid "Disk path to parse: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:290
+#: nova/virt/hyperv/volumeops.py:311
#, python-format
msgid "start_device_id: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:292
+#: nova/virt/hyperv/volumeops.py:313
#, python-format
msgid "end_device_id: %s"
msgstr ""
@@ -6253,68 +6827,63 @@ msgstr ""
msgid "An error has occurred when calling the iscsi initiator: %s"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:68
-msgid "The ISCSI initiator name can't be found. Choosing the default one"
-msgstr ""
-
-#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1464
-#: nova/virt/xenapi/vm_utils.py:476
-#, python-format
-msgid "block_device_list %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:334
+#: nova/virt/libvirt/driver.py:341
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:340
+#: nova/virt/libvirt/driver.py:347
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:361
+#: nova/virt/libvirt/driver.py:368
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:383 nova/virt/libvirt/driver.py:386
+#: nova/virt/libvirt/driver.py:390 nova/virt/libvirt/driver.py:393
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:468
+#: nova/virt/libvirt/driver.py:411
+#, python-format
+msgid "Connection to libvirt failed: %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:492
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:482
+#: nova/virt/libvirt/driver.py:506
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:487
+#: nova/virt/libvirt/driver.py:511
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:509
+#: nova/virt/libvirt/driver.py:533
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:524
+#: nova/virt/libvirt/driver.py:548
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:537
+#: nova/virt/libvirt/driver.py:561
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:554
+#: nova/virt/libvirt/driver.py:578
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:568
+#: nova/virt/libvirt/driver.py:587
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
@@ -6323,153 +6892,166 @@ msgstr ""
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:740
-msgid "attaching LXC block device"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:753
-msgid "detaching LXC block device"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:885
+#: nova/virt/libvirt/driver.py:837
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:889
+#: nova/virt/libvirt/driver.py:841
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:921
+#: nova/virt/libvirt/driver.py:873
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:956
+#: nova/virt/libvirt/driver.py:909
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1086
+#: nova/virt/libvirt/driver.py:1039
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1093 nova/virt/powervm/operator.py:253
+#: nova/virt/libvirt/driver.py:1046 nova/virt/powervm/operator.py:255
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1109
+#: nova/virt/libvirt/driver.py:1062
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1155
+#: nova/virt/libvirt/driver.py:1100 nova/virt/libvirt/driver.py:1126
+#, python-format
+msgid "Truncated console log returned, %d bytes ignored"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1115
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1199
+#: nova/virt/libvirt/driver.py:1164
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1203
+#: nova/virt/libvirt/driver.py:1168
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1207 nova/virt/libvirt/driver.py:1211
+#: nova/virt/libvirt/driver.py:1172 nova/virt/libvirt/driver.py:1176
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1277
+#: nova/virt/libvirt/driver.py:1241
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1403
-msgid "Using config drive"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:1413
-#, python-format
-msgid "Creating config drive at %(path)s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:1427
+#: nova/virt/libvirt/driver.py:1389
#, python-format
msgid "Injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1437
+#: nova/virt/libvirt/driver.py:1399
#, python-format
msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1511
+#: nova/virt/libvirt/driver.py:1473
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1517
+#: nova/virt/libvirt/driver.py:1479
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1483
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1525
+#: nova/virt/libvirt/driver.py:1487
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1541
+#: nova/virt/libvirt/driver.py:1503
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1833
+#: nova/virt/libvirt/driver.py:1819
msgid "Starting toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1837
+#: nova/virt/libvirt/driver.py:1823
msgid "Finished toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1854
+#: nova/virt/libvirt/driver.py:1840
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2106
+#: nova/virt/libvirt/driver.py:1994
+msgid ""
+"Cannot get the number of cpu, because this function is not implemented "
+"for this platform. "
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2111
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2293
+#: nova/virt/libvirt/driver.py:2194
+#, python-format
+msgid "Trying to get stats for the volume %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2218
+#, python-format
+msgid ""
+"Getting block stats failed, device might have been detached. "
+"Code=%(errcode)s Error=%(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2222
+#, python-format
+msgid ""
+"Could not find domain in libvirt for instance %s. Cannot get block stats "
+"for device"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2338
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2301
+#: nova/virt/libvirt/driver.py:2346
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2336
+#: nova/virt/libvirt/driver.py:2383
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2356
+#: nova/virt/libvirt/driver.py:2408
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2368
+#: nova/virt/libvirt/driver.py:2420
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -6479,212 +7061,229 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2385
+#: nova/virt/libvirt/driver.py:2437
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2433
+#: nova/virt/libvirt/driver.py:2485
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2503
+#: nova/virt/libvirt/driver.py:2557
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2547
+#: nova/virt/libvirt/driver.py:2647
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2674
+#: nova/virt/libvirt/driver.py:2762
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2723
+#: nova/virt/libvirt/driver.py:2811
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2785
+#: nova/virt/libvirt/driver.py:2860
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:2844
+#: nova/virt/libvirt/driver.py:2919
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2851
+#: nova/virt/libvirt/driver.py:2926
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:2902
+#: nova/virt/libvirt/driver.py:2977
msgid "Starting finish_revert_migration"
msgstr ""
-#: nova/virt/libvirt/firewall.py:34
+#: nova/virt/libvirt/firewall.py:35
msgid ""
"Libvirt module could not be loaded. NWFilterFirewall will not work "
"correctly."
msgstr ""
-#: nova/virt/libvirt/firewall.py:102
+#: nova/virt/libvirt/firewall.py:104
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:110
+#: nova/virt/libvirt/firewall.py:112
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:191
+#: nova/virt/libvirt/firewall.py:193
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:214
+#: nova/virt/libvirt/firewall.py:216
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:230
+#: nova/virt/libvirt/firewall.py:232
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:213
+#: nova/virt/libvirt/imagebackend.py:207
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:276
+#: nova/virt/libvirt/imagebackend.py:270
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:166
+#: nova/virt/libvirt/imagecache.py:111
+#, python-format
+msgid "Error reading image info file %(filename)s: %(error)s"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:151
+#, python-format
+msgid "Reading image info file: %s"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:173
+#, python-format
+msgid "Writing stored info to %s"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:294
#, python-format
msgid "%s is a valid instance name"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:169
+#: nova/virt/libvirt/imagecache.py:297
#, python-format
msgid "%s has a disk file"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:171
+#: nova/virt/libvirt/imagecache.py:299
#, python-format
msgid "Instance %(instance)s is backed by %(backing)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:184
+#: nova/virt/libvirt/imagecache.py:312
#, python-format
msgid ""
"Instance %(instance)s is using a backing file %(backing)s which does not "
"appear in the image service"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:239
+#: nova/virt/libvirt/imagecache.py:388
+#, python-format
+msgid "image %(id)s at (%(base_file)s): image verification failed"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:398
#, python-format
-msgid "%(id)s (%(base_file)s): image verification failed"
+msgid ""
+"image %(id)s at (%(base_file)s): image verification skipped, no hash "
+"stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:249
+#: nova/virt/libvirt/imagecache.py:407
#, python-format
-msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored"
+msgid "%(id)s (%(base_file)s): generating checksum"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:268
+#: nova/virt/libvirt/imagecache.py:422
#, python-format
msgid "Cannot remove %(base_file)s, it does not exist"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:280
+#: nova/virt/libvirt/imagecache.py:434
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:283
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:290
+#: nova/virt/libvirt/imagecache.py:444
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:301
+#: nova/virt/libvirt/imagecache.py:455
#, python-format
-msgid "%(id)s (%(base_file)s): checking"
+msgid "image %(id)s at (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:320
+#: nova/virt/libvirt/imagecache.py:479
#, python-format
msgid ""
-"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d "
-"on other nodes"
+"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
+"%(remote)d on other nodes sharing this instance storage"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:332
+#: nova/virt/libvirt/imagecache.py:491
#, python-format
msgid ""
-"%(id)s (%(base_file)s): warning -- an absent base file is in use! "
-"instances: %(instance_list)s"
+"image %(id)s at (%(base_file)s): warning -- an absent base file is in "
+"use! instances: %(instance_list)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:340
+#: nova/virt/libvirt/imagecache.py:503
#, python-format
-msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)"
+msgid "image %(id)s at (%(base_file)s): image is not in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:350
+#: nova/virt/libvirt/imagecache.py:510
#, python-format
-msgid "%(id)s (%(base_file)s): image is not in use"
+msgid "image %(id)s at (%(base_file)s): image is in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:356
-#, python-format
-msgid "%(id)s (%(base_file)s): image is in use"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:379
+#: nova/virt/libvirt/imagecache.py:534
#, python-format
msgid "Skipping verification, no base directory at %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:383
+#: nova/virt/libvirt/imagecache.py:538
msgid "Verify base images"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:390
+#: nova/virt/libvirt/imagecache.py:545
#, python-format
msgid "Image id %(id)s yields fingerprint %(fingerprint)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:408
+#: nova/virt/libvirt/imagecache.py:563
#, python-format
msgid "Unknown base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:413
+#: nova/virt/libvirt/imagecache.py:568
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:416
+#: nova/virt/libvirt/imagecache.py:571
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:420
+#: nova/virt/libvirt/imagecache.py:575
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:428
+#: nova/virt/libvirt/imagecache.py:583
msgid "Verification complete"
msgstr ""
@@ -6693,14 +7292,14 @@ msgstr ""
msgid "LVM snapshots not implemented"
msgstr ""
-#: nova/virt/libvirt/utils.py:134
+#: nova/virt/libvirt/utils.py:113
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
msgstr ""
-#: nova/virt/libvirt/utils.py:143
+#: nova/virt/libvirt/utils.py:122
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. "
@@ -6708,75 +7307,108 @@ msgid ""
"%(free_space)db."
msgstr ""
-#: nova/virt/libvirt/utils.py:190
+#: nova/virt/libvirt/utils.py:169
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:409
+#: nova/virt/libvirt/utils.py:435
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/utils.py:498
+#: nova/virt/libvirt/vif.py:110
#, python-format
-msgid "Reading image info file: %s"
+msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/utils.py:502
+#: nova/virt/libvirt/vif.py:120
#, python-format
-msgid "Read: %s"
+msgid "Ensuring bridge %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:197 nova/virt/libvirt/vif.py:268
+msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/utils.py:508
+#: nova/virt/libvirt/volume.py:188
#, python-format
-msgid "Error reading image info file %(filename)s: %(error)s"
+msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:532
+#: nova/virt/libvirt/volume.py:191
#, python-format
-msgid "Writing image info file: %s"
+msgid ""
+"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. "
+"Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/utils.py:533
+#: nova/virt/libvirt/volume.py:203
#, python-format
-msgid "Wrote: %s"
+msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/vif.py:97
+#: nova/virt/libvirt/volume_nfs.py:81
#, python-format
-msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
+msgid "%s is already mounted"
msgstr ""
-#: nova/virt/libvirt/vif.py:107
+#: nova/virt/powervm/blockdev.py:73
#, python-format
-msgid "Ensuring bridge %s"
+msgid "Removing the logical volume '%s'"
msgstr ""
-#: nova/virt/libvirt/vif.py:183 nova/virt/libvirt/vif.py:249
-#: nova/virt/libvirt/vif.py:309
-msgid "Failed while unplugging vif"
+#: nova/virt/powervm/blockdev.py:91
+#, python-format
+msgid "Fetching image '%s' from glance"
msgstr ""
-#: nova/virt/libvirt/volume.py:176
+#: nova/virt/powervm/blockdev.py:96
#, python-format
-msgid "iSCSI device not found at %s"
+msgid "Using image found at '%s'"
msgstr ""
-#: nova/virt/libvirt/volume.py:179
+#: nova/virt/powervm/blockdev.py:98
#, python-format
-msgid ""
-"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. "
-"Try number: %(tries)s"
+msgid "Ensuring image '%s' exists on IVM"
msgstr ""
-#: nova/virt/libvirt/volume.py:191
+#: nova/virt/powervm/blockdev.py:109
#, python-format
-msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
+msgid "Creating logical volume of size %s bytes"
msgstr ""
-#: nova/virt/libvirt/volume_nfs.py:81
+#: nova/virt/powervm/blockdev.py:112
#, python-format
-msgid "%s is already mounted"
+msgid "Copying image to the device '%s'"
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:115
+msgid "Error while creating logical volume from image. Will attempt cleanup."
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:122
+msgid "Error while attempting cleanup of failed deploy to logical volume."
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:174
+msgid "Could not create logical volume. No space left on any volume group."
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:245
+msgid "Unable to get checksum"
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:248
+msgid "Image checksums do not match"
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:269
+#, python-format
+msgid "Image found on host at '%s'"
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:277
+msgid "Uncompressed image file not found"
msgstr ""
#: nova/virt/powervm/common.py:54
@@ -6837,46 +7469,27 @@ msgstr ""
msgid "PowerVM LPAR instance '%(instance_name)s' cleanup failed"
msgstr ""
-#: nova/virt/powervm/operator.py:91
+#: nova/virt/powervm/operator.py:100
#, python-format
msgid "LPAR instance '%s' not found"
msgstr ""
-#: nova/virt/powervm/operator.py:174
+#: nova/virt/powervm/operator.py:184
msgid "Not enough free memory in the host"
msgstr ""
-#: nova/virt/powervm/operator.py:184
+#: nova/virt/powervm/operator.py:194
msgid "Insufficient available CPU on PowerVM"
msgstr ""
-#: nova/virt/powervm/operator.py:208
+#: nova/virt/powervm/operator.py:218
#, python-format
msgid "Creating LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:211
-#, python-format
-msgid "LPAR instance '%s' creation failed"
-msgstr ""
-
#: nova/virt/powervm/operator.py:221
#, python-format
-msgid "Fetching image '%s' from glance"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:225
-#, python-format
-msgid "Copying image '%s' to IVM"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:230
-msgid "Creating logical volume"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:235
-#, python-format
-msgid "Copying image to the device '%s'"
+msgid "LPAR instance '%s' creation failed"
msgstr ""
#: nova/virt/powervm/operator.py:238
@@ -6884,83 +7497,71 @@ msgstr ""
msgid "PowerVM image creation failed: %s"
msgstr ""
-#: nova/virt/powervm/operator.py:244
+#: nova/virt/powervm/operator.py:246
#, python-format
msgid "Activating the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:258
+#: nova/virt/powervm/operator.py:260
#, python-format
msgid "Instance '%s' failed to boot"
msgstr ""
-#: nova/virt/powervm/operator.py:275
+#: nova/virt/powervm/operator.py:272
+msgid "Error while attempting to clean up failed instance launch."
+msgstr ""
+
+#: nova/virt/powervm/operator.py:276
#, python-format
-msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
+msgid "Instance spawned in %s seconds"
msgstr ""
-#: nova/virt/powervm/operator.py:284
+#: nova/virt/powervm/operator.py:287
#, python-format
-msgid "Shutting down the instance '%s'"
+msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
msgstr ""
-#: nova/virt/powervm/operator.py:288
+#: nova/virt/powervm/operator.py:296
#, python-format
-msgid "Removing the logical volume '%s'"
+msgid "Shutting down the instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:291
+#: nova/virt/powervm/operator.py:305
#, python-format
msgid "Deleting the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:294
+#: nova/virt/powervm/operator.py:308
msgid "PowerVM instance cleanup failed"
msgstr ""
-#: nova/virt/powervm/operator.py:495
-msgid "Could not create logical volume. No space left on any volume group."
-msgstr ""
-
-#: nova/virt/powervm/operator.py:554
-msgid "Unable to get checksum"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:557
-msgid "Image checksums do not match"
-msgstr ""
-
-#: nova/virt/powervm/operator.py:582
-msgid "Uncompressed image file not found"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:111
+#: nova/virt/vmwareapi/driver.py:107
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
-"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
+"vmwareapi_host_password to usecompute_driver=vmwareapi.VMwareESXDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:275
+#: nova/virt/vmwareapi/driver.py:258
#, python-format
msgid "In vmwareapi:_create_session, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:358
+#: nova/virt/vmwareapi/driver.py:341
#, python-format
msgid "In vmwareapi:_call_method, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:393
+#: nova/virt/vmwareapi/driver.py:376
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: success"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:398
+#: nova/virt/vmwareapi/driver.py:381
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:402
+#: nova/virt/vmwareapi/driver.py:385
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr ""
@@ -7032,279 +7633,279 @@ msgstr ""
msgid "Created Port Group with name %s on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/read_write_util.py:145
+#: nova/virt/vmwareapi/read_write_util.py:142
#, python-format
-msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s"
+msgid "Exception during HTTP connection close in VMwareHTTPWrite. Exception is %s"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:84
+#: nova/virt/vmwareapi/vim.py:83
msgid "Unable to import suds."
msgstr ""
-#: nova/virt/vmwareapi/vim.py:90
+#: nova/virt/vmwareapi/vim.py:89
msgid "Must specify vmwareapi_wsdl_loc"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:145
+#: nova/virt/vmwareapi/vim.py:144
#, python-format
msgid "No such SOAP method '%s' provided by VI SDK"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:150
+#: nova/virt/vmwareapi/vim.py:149
#, python-format
msgid "httplib error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:157
+#: nova/virt/vmwareapi/vim.py:156
#, python-format
msgid "Socket error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:162
+#: nova/virt/vmwareapi/vim.py:161
#, python-format
msgid "Type error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:166
+#: nova/virt/vmwareapi/vim.py:165
#, python-format
msgid "Exception in %s "
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:66
+#: nova/virt/vmwareapi/vmops.py:60
msgid "Getting list of instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:82
+#: nova/virt/vmwareapi/vmops.py:76
#, python-format
msgid "Got total of %s instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:126
+#: nova/virt/vmwareapi/vmops.py:120
msgid "Couldn't get a local Datastore reference"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:196
+#: nova/virt/vmwareapi/vmops.py:190
msgid "Creating VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:204
+#: nova/virt/vmwareapi/vmops.py:198
msgid "Created VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:232
+#: nova/virt/vmwareapi/vmops.py:226
#, python-format
msgid ""
"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter "
"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:250
+#: nova/virt/vmwareapi/vmops.py:244
#, python-format
msgid ""
"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host "
"local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:260
+#: nova/virt/vmwareapi/vmops.py:254
#, python-format
msgid ""
"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:273
+#: nova/virt/vmwareapi/vmops.py:267
#, python-format
msgid ""
"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:285
+#: nova/virt/vmwareapi/vmops.py:279
#, python-format
msgid ""
"Downloading image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:301
+#: nova/virt/vmwareapi/vmops.py:295
#, python-format
msgid ""
"Downloaded image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:319
+#: nova/virt/vmwareapi/vmops.py:313
msgid "Reconfiguring VM instance to attach the image disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:326
+#: nova/virt/vmwareapi/vmops.py:320
msgid "Reconfigured VM instance to attach the image disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:333
+#: nova/virt/vmwareapi/vmops.py:327
msgid "Powering on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:339
+#: nova/virt/vmwareapi/vmops.py:333
msgid "Powered on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:385
+#: nova/virt/vmwareapi/vmops.py:379
msgid "Creating Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:395
+#: nova/virt/vmwareapi/vmops.py:389
msgid "Created Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:438
+#: nova/virt/vmwareapi/vmops.py:432
msgid "Copying disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:451
+#: nova/virt/vmwareapi/vmops.py:445
msgid "Copied disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:460
+#: nova/virt/vmwareapi/vmops.py:454
#, python-format
msgid "Uploading image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:474
+#: nova/virt/vmwareapi/vmops.py:468
#, python-format
msgid "Uploaded image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:485
+#: nova/virt/vmwareapi/vmops.py:479
#, python-format
msgid "Deleting temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:494
+#: nova/virt/vmwareapi/vmops.py:488
#, python-format
msgid "Deleted temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:526
+#: nova/virt/vmwareapi/vmops.py:520
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:533
+#: nova/virt/vmwareapi/vmops.py:527
msgid "Rebooting guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:536
+#: nova/virt/vmwareapi/vmops.py:530
msgid "Rebooted guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:538
+#: nova/virt/vmwareapi/vmops.py:532
msgid "Doing hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:542
+#: nova/virt/vmwareapi/vmops.py:536
msgid "Did hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:554
+#: nova/virt/vmwareapi/vmops.py:548
msgid "instance not present"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:573
+#: nova/virt/vmwareapi/vmops.py:567
msgid "Powering off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:578
+#: nova/virt/vmwareapi/vmops.py:572
msgid "Powered off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:582
+#: nova/virt/vmwareapi/vmops.py:576
msgid "Unregistering the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:585
+#: nova/virt/vmwareapi/vmops.py:579
msgid "Unregistered the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:587
+#: nova/virt/vmwareapi/vmops.py:581
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:599
+#: nova/virt/vmwareapi/vmops.py:593
#, python-format
msgid "Deleting contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:609
+#: nova/virt/vmwareapi/vmops.py:603
#, python-format
msgid "Deleted contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:614
+#: nova/virt/vmwareapi/vmops.py:608
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
"contents from the disk: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:623
+#: nova/virt/vmwareapi/vmops.py:617
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:627
+#: nova/virt/vmwareapi/vmops.py:621
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:641
+#: nova/virt/vmwareapi/vmops.py:635
msgid "Suspending the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:645
+#: nova/virt/vmwareapi/vmops.py:639
msgid "Suspended the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:648
+#: nova/virt/vmwareapi/vmops.py:642
msgid "instance is powered off and can not be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:651
+#: nova/virt/vmwareapi/vmops.py:645
msgid "VM was already in suspended state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:664
+#: nova/virt/vmwareapi/vmops.py:658
msgid "Resuming the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:669
+#: nova/virt/vmwareapi/vmops.py:663
msgid "Resumed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:671
+#: nova/virt/vmwareapi/vmops.py:665
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:707
+#: nova/virt/vmwareapi/vmops.py:701
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:765
+#: nova/virt/vmwareapi/vmops.py:759
#, python-format
msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:773
+#: nova/virt/vmwareapi/vmops.py:767
#, python-format
msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:810
+#: nova/virt/vmwareapi/vmops.py:804
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:814
+#: nova/virt/vmwareapi/vmops.py:808
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -7339,194 +7940,204 @@ msgstr ""
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:85 nova/virt/xenapi/vmops.py:1491
+#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1476
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1495
+#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1480
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:94 nova/virt/xenapi/vmops.py:1500
+#: nova/virt/xenapi/agent.py:93 nova/virt/xenapi/vmops.py:1485
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:104
+#: nova/virt/xenapi/agent.py:103
#, python-format
msgid ""
"The agent call to %(method)s returned an invalid response: %(ret)r. "
"path=%(path)s; args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:114
+#: nova/virt/xenapi/agent.py:113
#, python-format
msgid "Failed to query agent version: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:132
+#: nova/virt/xenapi/agent.py:131
msgid "Querying agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:146
+#: nova/virt/xenapi/agent.py:145
msgid "Reached maximum time attempting to query agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:154
+#: nova/virt/xenapi/agent.py:153
#, python-format
msgid "Updating agent to %s"
msgstr ""
-#: nova/virt/xenapi/agent.py:162
+#: nova/virt/xenapi/agent.py:161
#, python-format
msgid "Failed to update agent: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:176
+#: nova/virt/xenapi/agent.py:175
msgid "Setting admin password"
msgstr ""
-#: nova/virt/xenapi/agent.py:187
+#: nova/virt/xenapi/agent.py:186
#, python-format
msgid "Failed to exchange keys: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:207
+#: nova/virt/xenapi/agent.py:206
#, python-format
msgid "Failed to update password: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:214
+#: nova/virt/xenapi/agent.py:213
#, python-format
msgid "Injecting file path: %r"
msgstr ""
-#: nova/virt/xenapi/agent.py:227
+#: nova/virt/xenapi/agent.py:226
#, python-format
msgid "Failed to inject file: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:234
+#: nova/virt/xenapi/agent.py:233
msgid "Resetting network"
msgstr ""
-#: nova/virt/xenapi/agent.py:240
+#: nova/virt/xenapi/agent.py:239
#, python-format
msgid "Failed to reset network: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:263
+#: nova/virt/xenapi/agent.py:262
msgid ""
"XenServer tools installed in this image are capable of network injection."
" Networking files will not bemanipulated"
msgstr ""
-#: nova/virt/xenapi/agent.py:271
+#: nova/virt/xenapi/agent.py:270
msgid ""
"XenServer tools are present in this image but are not capable of network "
"injection"
msgstr ""
-#: nova/virt/xenapi/agent.py:275
+#: nova/virt/xenapi/agent.py:274
msgid "XenServer tools are not installed in this image"
msgstr ""
-#: nova/virt/xenapi/agent.py:327
+#: nova/virt/xenapi/agent.py:326
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:135
+#: nova/virt/xenapi/driver.py:133
msgid ""
"Must specify xenapi_connection_url, xenapi_connection_username "
"(optionally), and xenapi_connection_password to use "
"compute_driver=xenapi.XenAPIDriver"
msgstr ""
-#: nova/virt/xenapi/driver.py:162
+#: nova/virt/xenapi/driver.py:160
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:360
+#: nova/virt/xenapi/driver.py:344
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:571
+#: nova/virt/xenapi/driver.py:556
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:623
+#: nova/virt/xenapi/driver.py:616
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:661
+#: nova/virt/xenapi/driver.py:656
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:745 nova/virt/xenapi/driver.py:759
+#: nova/virt/xenapi/driver.py:740 nova/virt/xenapi/driver.py:754
#, python-format
msgid "Got exception: %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:670 nova/virt/xenapi/fake.py:772
-#: nova/virt/xenapi/fake.py:791 nova/virt/xenapi/fake.py:859
+#: nova/virt/xenapi/fake.py:680 nova/virt/xenapi/fake.py:784
+#: nova/virt/xenapi/fake.py:803 nova/virt/xenapi/fake.py:871
msgid "Raising NotImplemented"
msgstr ""
-#: nova/virt/xenapi/fake.py:672
+#: nova/virt/xenapi/fake.py:682
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:706
+#: nova/virt/xenapi/fake.py:716
#, python-format
msgid "Calling %(localname)s %(impl)s"
msgstr ""
-#: nova/virt/xenapi/fake.py:711
+#: nova/virt/xenapi/fake.py:721
#, python-format
msgid "Calling getter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:714
+#: nova/virt/xenapi/fake.py:724
#, python-format
msgid "Calling setter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:774
+#: nova/virt/xenapi/fake.py:786
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
"with the wrong number of arguments"
msgstr ""
-#: nova/virt/xenapi/host.py:71
+#: nova/virt/xenapi/host.py:69
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
" assuming it is a worker VM and skip ping migration to a new host"
msgstr ""
+#: nova/virt/xenapi/host.py:81
+#, python-format
+msgid "Aggregate for host %(host)s count not be found."
+msgstr ""
+
+#: nova/virt/xenapi/host.py:102
+#, python-format
+msgid "Unable to migrate VM %(vm_ref)sfrom %(host)s"
+msgstr ""
+
#: nova/virt/xenapi/host.py:157
#, python-format
msgid "Unable to get SR for this host: %s"
msgstr ""
-#: nova/virt/xenapi/host.py:191
+#: nova/virt/xenapi/host.py:192
#, python-format
msgid "Failed to extract instance support from %s"
msgstr ""
-#: nova/virt/xenapi/host.py:208
+#: nova/virt/xenapi/host.py:209
msgid "Unable to get updated status"
msgstr ""
-#: nova/virt/xenapi/host.py:211
+#: nova/virt/xenapi/host.py:212
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr ""
@@ -7546,651 +8157,678 @@ msgstr ""
msgid "Found no network for bridge %s"
msgstr ""
-#: nova/virt/xenapi/pool.py:75
+#: nova/virt/xenapi/pool.py:70
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: unrecoverable state during operation on "
"%(host)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:166
+#: nova/virt/xenapi/pool.py:157
#, python-format
msgid "Unable to eject %(host)s from the pool; pool not empty"
msgstr ""
-#: nova/virt/xenapi/pool.py:182
+#: nova/virt/xenapi/pool.py:174
#, python-format
msgid "Unable to eject %(host)s from the pool; No master found"
msgstr ""
-#: nova/virt/xenapi/pool.py:199
+#: nova/virt/xenapi/pool.py:191
#, python-format
msgid "Pool-Join failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:202
+#: nova/virt/xenapi/pool.py:194
#, python-format
msgid "Unable to join %(host)s in the pool"
msgstr ""
-#: nova/virt/xenapi/pool.py:218
+#: nova/virt/xenapi/pool.py:210
#, python-format
msgid "Pool-eject failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:230
+#: nova/virt/xenapi/pool.py:222
#, python-format
msgid "Unable to set up pool: %(e)s."
msgstr ""
-#: nova/virt/xenapi/pool.py:241
+#: nova/virt/xenapi/pool.py:233
#, python-format
msgid "Pool-set_name_label failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/vif.py:104
+#: nova/virt/xenapi/vif.py:102
#, python-format
msgid "Found no PIF for device %s"
msgstr ""
-#: nova/virt/xenapi/vif.py:123
+#: nova/virt/xenapi/vif.py:121
#, python-format
msgid ""
"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. "
"Expected %(vlan_num)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:257
+#: nova/virt/xenapi/vm_utils.py:271
msgid "Created VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:269
+#: nova/virt/xenapi/vm_utils.py:283
msgid "VM destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:276
+#: nova/virt/xenapi/vm_utils.py:288 nova/virt/xenapi/vm_utils.py:303
msgid "VM already halted, skipping shutdown..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:280
-msgid "Shutting down VM"
+#: nova/virt/xenapi/vm_utils.py:292
+msgid "Shutting down VM (cleanly)"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:312
+#: nova/virt/xenapi/vm_utils.py:307
+msgid "Shutting down VM (hard)"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:346
#, python-format
msgid "VBD not found in instance %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:329
+#: nova/virt/xenapi/vm_utils.py:363
#, python-format
msgid "VBD %s already detached"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:332
+#: nova/virt/xenapi/vm_utils.py:366
#, python-format
msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:337
+#: nova/virt/xenapi/vm_utils.py:371
#, python-format
msgid "Unable to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:342
+#: nova/virt/xenapi/vm_utils.py:376
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:353
+#: nova/virt/xenapi/vm_utils.py:387
#, python-format
msgid "Unable to destroy VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:372
+#: nova/virt/xenapi/vm_utils.py:406
#, python-format
msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:375
+#: nova/virt/xenapi/vm_utils.py:409
#, python-format
msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:391
+#: nova/virt/xenapi/vm_utils.py:425
#, python-format
msgid "Unable to destroy VDI %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:423
+#: nova/virt/xenapi/vm_utils.py:457
#, python-format
msgid ""
"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)"
" on %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:454
+#: nova/virt/xenapi/vm_utils.py:472
msgid "SR not present and could not be introduced"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:555
+#: nova/virt/xenapi/vm_utils.py:582
#, python-format
msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:575
+#: nova/virt/xenapi/vm_utils.py:602
#, python-format
msgid "No primary VDI found for %(vm_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:584
+#: nova/virt/xenapi/vm_utils.py:611
msgid "Starting snapshot for VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:632
+#: nova/virt/xenapi/vm_utils.py:659
#, python-format
msgid "Destroying cached VDI '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:690
+#: nova/virt/xenapi/vm_utils.py:717
#, python-format
msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:876
+#: nova/virt/xenapi/vm_utils.py:903
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %(sr_type)s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:934
+#: nova/virt/xenapi/vm_utils.py:959
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:968
+#: nova/virt/xenapi/vm_utils.py:993
#, python-format
msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:980
+#: nova/virt/xenapi/vm_utils.py:1005
#, python-format
msgid ""
"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, "
"params: %(params)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:993
+#: nova/virt/xenapi/vm_utils.py:1018
#, python-format
msgid "download_vhd failed: %r"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1029
+#: nova/virt/xenapi/vm_utils.py:1052
#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1040
+#: nova/virt/xenapi/vm_utils.py:1063
#, python-format
msgid "Asking xapi to fetch vhd image %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1104
+#: nova/virt/xenapi/vm_utils.py:1127
#, python-format
msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1120
+#: nova/virt/xenapi/vm_utils.py:1142
#, python-format
msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1124
+#: nova/virt/xenapi/vm_utils.py:1146
#, python-format
msgid ""
"Image size %(size_bytes)d exceeded instance_type allowed size "
"%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1146
+#: nova/virt/xenapi/vm_utils.py:1168
#, python-format
msgid "Fetching image %(image_id)s, type %(image_type_str)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1159
+#: nova/virt/xenapi/vm_utils.py:1181
#, python-format
msgid "Size for image %(image_id)s: %(virtual_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1168
+#: nova/virt/xenapi/vm_utils.py:1190
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1187
+#: nova/virt/xenapi/vm_utils.py:1209
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1201
+#: nova/virt/xenapi/vm_utils.py:1223
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1210
+#: nova/virt/xenapi/vm_utils.py:1232
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1248
+#: nova/virt/xenapi/vm_utils.py:1273
#, python-format
msgid "Detected %(image_type_str)s format for image %(image_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1269
+#: nova/virt/xenapi/vm_utils.py:1304
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1287
+#: nova/virt/xenapi/vm_utils.py:1322
+#, python-format
+msgid ""
+"Image format is None: trying to determine PV status using pygrub; if "
+"instance with vdi %s does not boot correctly, try with image metadata."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1328
#, python-format
msgid "Unknown image format %(disk_image_type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1318
+#: nova/virt/xenapi/vm_utils.py:1359
#, python-format
msgid "VDI %s is still available"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1414
+#: nova/virt/xenapi/vm_utils.py:1455
#, python-format
msgid "Unable to parse rrd of %(vm_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1441
+#: nova/virt/xenapi/vm_utils.py:1482
#, python-format
msgid "Re-scanning SR %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1469
+#: nova/virt/xenapi/vm_utils.py:1510
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1487
+#: nova/virt/xenapi/vm_utils.py:1528
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration and/or configure the flag "
"'sr_matching_filter'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1500
+#: nova/virt/xenapi/vm_utils.py:1541
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1508
+#: nova/virt/xenapi/vm_utils.py:1549
#, python-format
msgid "ISO: looking at SR %(sr_rec)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1510
+#: nova/virt/xenapi/vm_utils.py:1551
msgid "ISO: not iso content"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1513
+#: nova/virt/xenapi/vm_utils.py:1554
msgid "ISO: iso content_type, no 'i18n-key' key"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1516
+#: nova/virt/xenapi/vm_utils.py:1557
msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1520
+#: nova/virt/xenapi/vm_utils.py:1561
msgid "ISO: SR MATCHing our criteria"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1522
+#: nova/virt/xenapi/vm_utils.py:1563
msgid "ISO: ISO, looking to see if it is host local"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1525
+#: nova/virt/xenapi/vm_utils.py:1566
#, python-format
msgid "ISO: PBD %(pbd_ref)s disappeared"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1528
+#: nova/virt/xenapi/vm_utils.py:1569
#, python-format
msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1531
+#: nova/virt/xenapi/vm_utils.py:1572
msgid "ISO: SR with local PBD"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1553
+#: nova/virt/xenapi/vm_utils.py:1594
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1569
+#: nova/virt/xenapi/vm_utils.py:1610
#, python-format
msgid "Unable to obtain RRD XML updates with server details: %(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1623
+#: nova/virt/xenapi/vm_utils.py:1664
#, python-format
msgid "Invalid statistics data from Xenserver: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1683
+#: nova/virt/xenapi/vm_utils.py:1724
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1770
+#: nova/virt/xenapi/vm_utils.py:1811
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
"%(original_parent_uuid)s, waiting for coalesce..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1780
+#: nova/virt/xenapi/vm_utils.py:1821
#, python-format
msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1815
+#: nova/virt/xenapi/vm_utils.py:1856
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1835
+#: nova/virt/xenapi/vm_utils.py:1876
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1848
+#: nova/virt/xenapi/vm_utils.py:1889
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1851
+#: nova/virt/xenapi/vm_utils.py:1892
#, python-format
msgid "Plugging VBD %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1853
+#: nova/virt/xenapi/vm_utils.py:1894
#, python-format
msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1856
+#: nova/virt/xenapi/vm_utils.py:1897
#, python-format
msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1861
+#: nova/virt/xenapi/vm_utils.py:1902
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1869
+#: nova/virt/xenapi/vm_utils.py:1910
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1882
+#: nova/virt/xenapi/vm_utils.py:1923
#, python-format
msgid "Running pygrub against %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1889
+#: nova/virt/xenapi/vm_utils.py:1931
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1891
+#: nova/virt/xenapi/vm_utils.py:1933
msgid "No Xen kernel found. Booting HVM."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1904
+#: nova/virt/xenapi/vm_utils.py:1935
+msgid ""
+"Error while executing pygrub! Please, ensure the binary is installed "
+"correctly, and available in your PATH; on some Linux distros, pygrub may "
+"be installed in /usr/lib/xen-X.Y/bin/pygrub. Attempting to boot in HVM "
+"mode."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1952
msgid "Partitions:"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1910
+#: nova/virt/xenapi/vm_utils.py:1958
#, python-format
msgid " %(num)s: %(ptype)s %(size)d sectors"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1935
+#: nova/virt/xenapi/vm_utils.py:1983
#, python-format
msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to "
"%(dev_path)s..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1948
+#: nova/virt/xenapi/vm_utils.py:1996
#, python-format
msgid "Writing partition table %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2002
+#: nova/virt/xenapi/vm_utils.py:2050
#, python-format
msgid ""
"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2034
+#: nova/virt/xenapi/vm_utils.py:2082
#, python-format
msgid ""
"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% "
"reduction in size"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2083
+#: nova/virt/xenapi/vm_utils.py:2134
msgid "Manipulating interface files directly"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2092
+#: nova/virt/xenapi/vm_utils.py:2143
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2204
+#: nova/virt/xenapi/vm_utils.py:2255
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:128 nova/virt/xenapi/vmops.py:674
+#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:692
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:169
+#: nova/virt/xenapi/vmops.py:168
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:237
+#: nova/virt/xenapi/vmops.py:254
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:305
+#: nova/virt/xenapi/vmops.py:323
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:377
+#: nova/virt/xenapi/vmops.py:395
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:408
+#: nova/virt/xenapi/vmops.py:426
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:481
+#: nova/virt/xenapi/vmops.py:499
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:498
+#: nova/virt/xenapi/vmops.py:516
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:524
+#: nova/virt/xenapi/vmops.py:542
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:530
+#: nova/virt/xenapi/vmops.py:548
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:544
+#: nova/virt/xenapi/vmops.py:562
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:547
+#: nova/virt/xenapi/vmops.py:565
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:558
+#: nova/virt/xenapi/vmops.py:576
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:585
+#: nova/virt/xenapi/vmops.py:603
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:593
+#: nova/virt/xenapi/vmops.py:611
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:643
+#: nova/virt/xenapi/vmops.py:661
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:647
+#: nova/virt/xenapi/vmops.py:665
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:655
+#: nova/virt/xenapi/vmops.py:673
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:692
+#: nova/virt/xenapi/vmops.py:710
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:816
+#: nova/virt/xenapi/vmops.py:716 nova/virt/xenapi/vmops.py:766
+msgid "Clean shutdown did not complete successfully, trying hard shutdown."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:795
+msgid "Resize down not allowed without auto_disk_config"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:840
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:821
+#: nova/virt/xenapi/vmops.py:845
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:865
+#: nova/virt/xenapi/vmops.py:889
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:956
+#: nova/virt/xenapi/vmops.py:980
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:982
+#: nova/virt/xenapi/vmops.py:1006
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1009
+#: nova/virt/xenapi/vmops.py:1033
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1016
+#: nova/virt/xenapi/vmops.py:1040
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1023
+#: nova/virt/xenapi/vmops.py:1047
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1049
+#: nova/virt/xenapi/vmops.py:1073
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1075
+#: nova/virt/xenapi/vmops.py:1099
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1126
+#: nova/virt/xenapi/vmops.py:1150
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1160
+#: nova/virt/xenapi/vmops.py:1184
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1209
+#: nova/virt/xenapi/vmops.py:1233
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1213
+#: nova/virt/xenapi/vmops.py:1237
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1312
+#: nova/virt/xenapi/vmops.py:1297
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1395
+#: nova/virt/xenapi/vmops.py:1380
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1414
+#: nova/virt/xenapi/vmops.py:1399
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1423
+#: nova/virt/xenapi/vmops.py:1408
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1426
+#: nova/virt/xenapi/vmops.py:1411
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1454
+#: nova/virt/xenapi/vmops.py:1439
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1550
+#: nova/virt/xenapi/vmops.py:1535
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1582
+#: nova/virt/xenapi/vmops.py:1567
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1630
+#: nova/virt/xenapi/vmops.py:1615
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1666
+#: nova/virt/xenapi/vmops.py:1651
msgid "Migrate Send failed"
msgstr ""
@@ -8198,12 +8836,12 @@ msgstr ""
msgid "creating sr within volume_utils"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:44 nova/virt/xenapi/volume_utils.py:72
+#: nova/virt/xenapi/volume_utils.py:44 nova/virt/xenapi/volume_utils.py:73
#, python-format
msgid "type is = %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:47 nova/virt/xenapi/volume_utils.py:75
+#: nova/virt/xenapi/volume_utils.py:47 nova/virt/xenapi/volume_utils.py:76
#, python-format
msgid "name = %s"
msgstr ""
@@ -8213,7 +8851,7 @@ msgstr ""
msgid "Created %(label)s as %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:65 nova/virt/xenapi/volume_utils.py:163
+#: nova/virt/xenapi/volume_utils.py:65 nova/virt/xenapi/volume_utils.py:164
msgid "Unable to create Storage Repository"
msgstr ""
@@ -8221,93 +8859,93 @@ msgstr ""
msgid "introducing sr within volume_utils"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:92 nova/virt/xenapi/volume_utils.py:159
-#: nova/virt/xenapi/volumeops.py:150
+#: nova/virt/xenapi/volume_utils.py:93 nova/virt/xenapi/volume_utils.py:160
+#: nova/virt/xenapi/volumeops.py:141
#, python-format
msgid "Introduced %(label)s as %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:95
+#: nova/virt/xenapi/volume_utils.py:96
msgid "Creating pbd for SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:97
+#: nova/virt/xenapi/volume_utils.py:98
msgid "Plugging SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:105 nova/virt/xenapi/volumeops.py:154
+#: nova/virt/xenapi/volume_utils.py:106 nova/virt/xenapi/volumeops.py:145
msgid "Unable to introduce Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:116 nova/virt/xenapi/volumeops.py:46
+#: nova/virt/xenapi/volume_utils.py:117 nova/virt/xenapi/volumeops.py:46
msgid "Unable to get SR using uuid"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:118
+#: nova/virt/xenapi/volume_utils.py:119
#, python-format
msgid "Forgetting SR %s..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:126
+#: nova/virt/xenapi/volume_utils.py:127
msgid "Unable to forget Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:146
+#: nova/virt/xenapi/volume_utils.py:147
#, python-format
msgid "Introducing %s..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:175
+#: nova/virt/xenapi/volume_utils.py:176
#, python-format
msgid "Unable to find SR from VBD %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:193
+#: nova/virt/xenapi/volume_utils.py:194
#, python-format
msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:199
+#: nova/virt/xenapi/volume_utils.py:200
#, python-format
msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:223
+#: nova/virt/xenapi/volume_utils.py:224
#, python-format
msgid "Unable to introduce VDI on SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:231
+#: nova/virt/xenapi/volume_utils.py:232
#, python-format
msgid "Unable to get record of VDI %s on"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:253
+#: nova/virt/xenapi/volume_utils.py:254
#, python-format
msgid "Unable to introduce VDI for SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:263
+#: nova/virt/xenapi/volume_utils.py:264
#, python-format
msgid "Error finding vdis in SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:270
+#: nova/virt/xenapi/volume_utils.py:271
#, python-format
msgid "Unable to find vbd for vdi %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:281
+#: nova/virt/xenapi/volume_utils.py:282
#, python-format
msgid "Unable to obtain target information %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:309
+#: nova/virt/xenapi/volume_utils.py:321
#, python-format
msgid "Unable to obtain target information %(connection_data)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:335
+#: nova/virt/xenapi/volume_utils.py:347
#, python-format
msgid "Mountpoint cannot be translated: %s"
msgstr ""
@@ -8356,214 +8994,88 @@ msgstr ""
msgid "Could not forget SR"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:172
+#: nova/virt/xenapi/volumeops.py:128
#, python-format
-msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
+msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:182
+#: nova/virt/xenapi/volumeops.py:163
#, python-format
-msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
+msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:193
+#: nova/virt/xenapi/volumeops.py:173
#, python-format
-msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
+msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:202
+#: nova/virt/xenapi/volumeops.py:191
#, python-format
msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:210
+#: nova/virt/xenapi/volumeops.py:199
#, python-format
msgid "Unable to locate volume %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:219
+#: nova/virt/xenapi/volumeops.py:208
#, python-format
msgid "Unable to detach volume %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:224
+#: nova/virt/xenapi/volumeops.py:213
#, python-format
msgid "Unable to destroy vbd %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:231
+#: nova/virt/xenapi/volumeops.py:220
#, python-format
msgid "Error purging SR %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:233
+#: nova/virt/xenapi/volumeops.py:222
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:96 nova/vnc/xvp_proxy.py:101
+#: nova/vnc/xvp_proxy.py:95 nova/vnc/xvp_proxy.py:100
#, python-format
msgid "Error in handshake: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:117
+#: nova/vnc/xvp_proxy.py:116
#, python-format
msgid "Invalid request: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:137
+#: nova/vnc/xvp_proxy.py:136
#, python-format
msgid "Request: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:140
+#: nova/vnc/xvp_proxy.py:139
#, python-format
msgid "Request made with missing token: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:150
+#: nova/vnc/xvp_proxy.py:149
#, python-format
msgid "Request made with invalid token: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:157
+#: nova/vnc/xvp_proxy.py:156
#, python-format
msgid "Unexpected error: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:177
+#: nova/vnc/xvp_proxy.py:176
#, python-format
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
-#: nova/volume/cinder.py:68
+#: nova/volume/cinder.py:70
#, python-format
msgid "Cinderclient connection created using URL: %s"
msgstr ""
-#: nova/volume/driver.py:103
-#, python-format
-msgid "Recovering from a failed execute. Try number %s"
-msgstr ""
-
-#: nova/volume/driver.py:113
-#, python-format
-msgid "volume group %s doesn't exist"
-msgstr ""
-
-#: nova/volume/driver.py:324
-#, python-format
-msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s"
-msgstr ""
-
-#: nova/volume/driver.py:397
-#, python-format
-msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s"
-msgstr ""
-
-#: nova/volume/driver.py:414
-#, python-format
-msgid ""
-"Skipping remove_export. No iscsi_target is presently exported for volume:"
-" %s"
-msgstr ""
-
-#: nova/volume/driver.py:423
-msgid "ISCSI provider_location not stored, using discovery"
-msgstr ""
-
-#: nova/volume/driver.py:470
-#, python-format
-msgid "Could not find iSCSI export for volume %s"
-msgstr ""
-
-#: nova/volume/driver.py:474
-#, python-format
-msgid "ISCSI Discovery: Found %s"
-msgstr ""
-
-#: nova/volume/driver.py:565
-#, python-format
-msgid "Cannot confirm exported volume id:%(volume_id)s."
-msgstr ""
-
-#: nova/volume/driver.py:606
-#, python-format
-msgid "FAKE ISCSI: %s"
-msgstr ""
-
-#: nova/volume/driver.py:618
-#, python-format
-msgid "rbd has no pool %s"
-msgstr ""
-
-#: nova/volume/driver.py:740
-#, python-format
-msgid "Image %s is not stored in rbd"
-msgstr ""
-
-#: nova/volume/driver.py:744
-#, python-format
-msgid "Image %s has blank components"
-msgstr ""
-
-#: nova/volume/driver.py:747
-#, python-format
-msgid "Image %s is not an rbd snapshot"
-msgstr ""
-
-#: nova/volume/driver.py:762
-#, python-format
-msgid "%s is in a different ceph cluster"
-msgstr ""
-
-#: nova/volume/driver.py:773
-#, python-format
-msgid "Unable to read image %s"
-msgstr ""
-
-#: nova/volume/driver.py:815
-#, python-format
-msgid "Sheepdog is not working: %s"
-msgstr ""
-
-#: nova/volume/driver.py:820
-msgid "Sheepdog is not working"
-msgstr ""
-
-#: nova/volume/driver.py:924 nova/volume/driver.py:929
-#, python-format
-msgid "LoggingVolumeDriver: %s"
-msgstr ""
-
-#: nova/volume/iscsi.py:122
-#, python-format
-msgid "Creating volume: %s"
-msgstr ""
-
-#: nova/volume/iscsi.py:136
-#, python-format
-msgid "Failed to create iscsi target for volume id:%(vol_id)s."
-msgstr ""
-
-#: nova/volume/iscsi.py:146
-#, python-format
-msgid ""
-"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure "
-"your tgtd config file contains 'include %(volumes_dir)s/*'"
-msgstr ""
-
-#: nova/volume/iscsi.py:154 nova/volume/iscsi.py:196
-#, python-format
-msgid "Removing volume: %s"
-msgstr ""
-
-#: nova/volume/iscsi.py:168
-#, python-format
-msgid "Failed to create iscsi target for volume id:%(volume_id)s."
-msgstr ""
-
-#: nova/volume/iscsi.py:177
-msgid "valid iqn needed for show_target"
-msgstr ""
-
diff --git a/nova/manager.py b/nova/manager.py
index 22a42d2d3..7df63f719 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -54,34 +54,61 @@ This module provides Manager, a base class for managers.
"""
import eventlet
+import time
-from nova import config
from nova.db import base
-from nova import flags
+from nova import exception
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import version
-CONF = config.CONF
+
+periodic_opts = [
+ cfg.BoolOpt('run_external_periodic_tasks',
+ default=True,
+ help=('Some periodic tasks can be run in a separate process. '
+ 'Should we run them here?')),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(periodic_opts)
+CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
+DEFAULT_INTERVAL = 60.0
+
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
- 1. Without arguments '@periodic_task', this will be run on every tick
+ 1. Without arguments '@periodic_task', this will be run on every cycle
of the periodic scheduler.
- 2. With arguments, @periodic_task(ticks_between_runs=N), this will be
- run on every N ticks of the periodic scheduler.
+ 2. With arguments, @periodic_task(periodic_spacing=N), this will be
+ run on approximately every N seconds. If this number is negative the
+ periodic task will be disabled.
"""
def decorator(f):
+ # Test for old style invocation
+ if 'ticks_between_runs' in kwargs:
+ raise exception.InvalidPeriodicTaskArg(arg='ticks_between_runs')
+
+ # Control if run at all
f._periodic_task = True
- f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0)
+ f._periodic_external_ok = kwargs.pop('external_process_ok', False)
+ if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
+ f._periodic_enabled = False
+ else:
+ f._periodic_enabled = kwargs.pop('enabled', True)
+
+ # Control frequency
+ f._periodic_spacing = kwargs.pop('spacing', 0)
+ f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
@@ -117,16 +144,39 @@ class ManagerMeta(type):
cls._periodic_tasks = []
try:
- cls._ticks_to_skip = cls._ticks_to_skip.copy()
+ cls._periodic_last_run = cls._periodic_last_run.copy()
except AttributeError:
- cls._ticks_to_skip = {}
+ cls._periodic_last_run = {}
+
+ try:
+ cls._periodic_spacing = cls._periodic_spacing.copy()
+ except AttributeError:
+ cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
+
+ if task._periodic_spacing < 0:
+ LOG.info(_('Skipping periodic task %(task)s because '
+ 'its interval is negative'),
+ {'task': name})
+ continue
+ if not task._periodic_enabled:
+ LOG.info(_('Skipping periodic task %(task)s because '
+ 'it is disabled'),
+ {'task': name})
+ continue
+
+ # A periodic spacing of zero indicates that this task should
+ # be run every pass
+ if task._periodic_spacing == 0:
+ task._periodic_spacing = None
+
cls._periodic_tasks.append((name, task))
- cls._ticks_to_skip[name] = task._ticks_between_runs
+ cls._periodic_spacing[name] = task._periodic_spacing
+ cls._periodic_last_run[name] = task._periodic_last_run
class Manager(base.Base):
@@ -140,6 +190,7 @@ class Manager(base.Base):
host = CONF.host
self.host = host
self.load_plugins()
+ self.backdoor_port = None
super(Manager, self).__init__(db_driver)
def load_plugins(self):
@@ -156,30 +207,40 @@ class Manager(base.Base):
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
+ idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
- ticks_to_skip = self._ticks_to_skip[task_name]
- if ticks_to_skip > 0:
- LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s"
- " ticks left until next run"), locals())
- self._ticks_to_skip[task_name] -= 1
- continue
+ # If a periodic task is _nearly_ due, then we'll run it early
+ if self._periodic_spacing[task_name] is None:
+ wait = 0
+ else:
+ due = (self._periodic_last_run[task_name] +
+ self._periodic_spacing[task_name])
+ wait = max(0, due - time.time())
+ if wait > 0.2:
+ if wait < idle_for:
+ idle_for = wait
+ continue
- self._ticks_to_skip[task_name] = task._ticks_between_runs
LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
+ self._periodic_last_run[task_name] = time.time()
try:
task(self, context)
- # NOTE(tiantian): After finished a task, allow manager to
- # do other work (report_state, processing AMPQ request etc.)
- eventlet.sleep(0)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
locals())
+ if (not self._periodic_spacing[task_name] is None and
+ self._periodic_spacing[task_name] < idle_for):
+ idle_for = self._periodic_spacing[task_name]
+ eventlet.sleep(0)
+
+ return idle_for
+
def init_host(self):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
@@ -189,7 +250,7 @@ class Manager(base.Base):
"""
pass
- def pre_start_hook(self):
+ def pre_start_hook(self, **kwargs):
"""Hook to provide the manager the ability to do additional
start-up work before any RPC queues/consumers are created. This is
called after other initialization has succeeded and a service
@@ -240,6 +301,8 @@ class SchedulerDependentManager(Manager):
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
+ if not isinstance(capabilities, list):
+ capabilities = [capabilities]
self.last_capabilities = capabilities
@periodic_task
diff --git a/nova/netconf.py b/nova/netconf.py
new file mode 100644
index 000000000..531a9e200
--- /dev/null
+++ b/nova/netconf.py
@@ -0,0 +1,62 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+
+from nova.openstack.common import cfg
+
+CONF = cfg.CONF
+
+
+def _get_my_ip():
+ """
+ Returns the actual ip of the local machine.
+
+ This code figures out what source address would be used if some traffic
+ were to be sent out to some well known address on the Internet. In this
+ case, a Google DNS server is used, but the specific address does not
+ matter much. No traffic is actually sent.
+ """
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.error:
+ return "127.0.0.1"
+
+
+netconf_opts = [
+ cfg.StrOpt('my_ip',
+ default=_get_my_ip(),
+ help='ip address of this host'),
+ cfg.StrOpt('host',
+ default=socket.getfqdn(),
+ help='Name of this node. This can be an opaque identifier. '
+ 'It is not necessarily a hostname, FQDN, or IP address. '
+ 'However, the node name must be valid within '
+ 'an AMQP key, and if using ZeroMQ, a valid '
+ 'hostname, FQDN, or IP address'),
+ cfg.BoolOpt('use_ipv6',
+ default=False,
+ help='use ipv6'),
+]
+
+CONF.register_opts(netconf_opts)
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
index 221130a91..f0eeb4e84 100644
--- a/nova/network/__init__.py
+++ b/nova/network/__init__.py
@@ -18,12 +18,21 @@
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.network import <foo>' elsewhere.
-import nova.config
-import nova.flags
+import nova.openstack.common.cfg
import nova.openstack.common.importutils
+_network_opts = [
+ nova.openstack.common.cfg.StrOpt('network_api_class',
+ default='nova.network.api.API',
+ help='The full class name of the '
+ 'network API class to use'),
+]
+
+nova.openstack.common.cfg.CONF.register_opts(_network_opts)
+
def API():
importutils = nova.openstack.common.importutils
- cls = importutils.import_class(nova.config.CONF.network_api_class)
+ network_api_class = nova.openstack.common.cfg.CONF.network_api_class
+ cls = importutils.import_class(network_api_class)
return cls()
diff --git a/nova/network/api.py b/nova/network/api.py
index e4fd38b13..976be93ed 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -22,11 +22,9 @@ import inspect
from nova.db import base
from nova import exception
-from nova import flags
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
LOG = logging.getLogger(__name__)
@@ -52,11 +50,8 @@ def refresh_cache(f):
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
- # get nw_info from return if possible, otherwise call for it
- nw_info = res if isinstance(res, network_model.NetworkInfo) else None
-
- update_instance_cache_with_nw_info(self, context, instance, nw_info,
- *args, **kwargs)
+ update_instance_cache_with_nw_info(self, context, instance,
+ nw_info=res)
# return the original function's return value
return res
@@ -64,24 +59,28 @@ def refresh_cache(f):
def update_instance_cache_with_nw_info(api, context, instance,
- nw_info=None,
- *args,
- **kwargs):
+ nw_info=None):
try:
- nw_info = nw_info or api._get_instance_nw_info(context, instance)
-
+ if not isinstance(nw_info, network_model.NetworkInfo):
+ nw_info = None
+ if not nw_info:
+ nw_info = api._get_instance_nw_info(context, instance)
# update cache
cache = {'network_info': nw_info.json()}
api.db.instance_info_cache_update(context, instance['uuid'], cache)
- except Exception as e:
- LOG.exception('Failed storing info cache', instance=instance)
- LOG.debug(_('args: %s') % (args or {}))
- LOG.debug(_('kwargs: %s') % (kwargs or {}))
+ except Exception:
+ LOG.exception(_('Failed storing info cache'), instance=instance)
class API(base.Base):
- """API for interacting with the network manager."""
+ """API for doing networking via the nova-network network manager.
+
+ This is a pluggable module - other implementations do networking via
+ other services (such as Quantum).
+ """
+
+ _sentinel = object()
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
@@ -124,6 +123,9 @@ class API(base.Base):
return self.network_rpcapi.get_floating_ips_by_fixed_address(context,
fixed_address)
+ def get_backdoor_port(self, context, host):
+ return self.network_rpcapi.get_backdoor_port(context, host)
+
def get_instance_id_by_floating_address(self, context, address):
# NOTE(tr3buchet): i hate this
return self.network_rpcapi.get_instance_id_by_floating_address(context,
@@ -137,7 +139,7 @@ class API(base.Base):
return self.network_rpcapi.get_vif_by_mac_address(context, mac_address)
def allocate_floating_ip(self, context, pool=None):
- """Adds a floating ip to a project from a pool. (allocates)"""
+ """Adds (allocates) a floating ip to a project from a pool."""
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
@@ -147,7 +149,7 @@ class API(base.Base):
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
- """Removes floating ip with address from a project. (deallocates)"""
+ """Removes (deallocates) a floating ip with address from a project."""
return self.network_rpcapi.deallocate_floating_ip(context, address,
affect_auto_assigned)
@@ -182,9 +184,15 @@ class API(base.Base):
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
- requested_networks):
+ requested_networks, macs=None):
"""Allocates all network structures for an instance.
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: macs is ignored by nova-network.
:returns: network info as from get_instance_nw_info() below
"""
args = {}
@@ -208,13 +216,15 @@ class API(base.Base):
args['host'] = instance['host']
self.network_rpcapi.deallocate_for_instance(context, **args)
+ @refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
- args = {'instance_id': instance['id'],
+ args = {'instance_id': instance['uuid'],
'host': instance['host'],
'network_id': network_id}
self.network_rpcapi.add_fixed_ip_to_instance(context, **args)
+ @refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
@@ -228,10 +238,23 @@ class API(base.Base):
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
- @refresh_cache
- def get_instance_nw_info(self, context, instance):
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ """Associate or disassociate host or project to network."""
+ associations = {}
+ if host is not API._sentinel:
+ associations['host'] = host
+ if project is not API._sentinel:
+ associations['project'] = project
+ self.network_rpcapi.associate(context, network_uuid, associations)
+
+ def get_instance_nw_info(self, context, instance, update_cache=True):
"""Returns all network info related to an instance."""
- return self._get_instance_nw_info(context, instance)
+ result = self._get_instance_nw_info(context, instance)
+ if update_cache:
+ update_instance_cache_with_nw_info(self, context, instance,
+ result)
+ return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
@@ -265,7 +288,7 @@ class API(base.Base):
return self.network_rpcapi.get_dns_domains(context)
def add_dns_entry(self, context, address, name, dns_type, domain):
- """Create specified DNS entry for address"""
+ """Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
@@ -273,7 +296,7 @@ class API(base.Base):
return self.network_rpcapi.add_dns_entry(context, **args)
def modify_dns_entry(self, context, name, address, domain):
- """Create specified DNS entry for address"""
+ """Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
@@ -289,12 +312,12 @@ class API(base.Base):
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
def get_dns_entries_by_address(self, context, address, domain):
- """Get entries for address and domain"""
+ """Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
def get_dns_entries_by_name(self, context, name, domain):
- """Get entries for name and domain"""
+ """Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@@ -338,7 +361,7 @@ class API(base.Base):
return [floating_ip['address'] for floating_ip in floating_ips]
def migrate_instance_start(self, context, instance, migration):
- """Start to migrate the network of an instance"""
+ """Start to migrate the network of an instance."""
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance['instance_type']['rxtx_factor'],
@@ -351,12 +374,12 @@ class API(base.Base):
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
- args['host'] = migration['dest_compute']
+ args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
def migrate_instance_finish(self, context, instance, migration):
- """Finish migrating the network of an instance"""
+ """Finish migrating the network of an instance."""
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance['instance_type']['rxtx_factor'],
diff --git a/nova/network/dns_driver.py b/nova/network/dns_driver.py
index cd2001b64..07b690b91 100644
--- a/nova/network/dns_driver.py
+++ b/nova/network/dns_driver.py
@@ -14,31 +14,31 @@
class DNSDriver(object):
- """ Defines the DNS manager interface. Does nothing. """
+ """Defines the DNS manager interface. Does nothing."""
def __init__(self):
pass
def get_domains(self):
- return []
+ raise NotImplementedError()
def create_entry(self, _name, _address, _type, _domain):
- pass
+ raise NotImplementedError()
def delete_entry(self, _name, _domain):
- pass
+ raise NotImplementedError()
def modify_address(self, _name, _address, _domain):
- pass
+ raise NotImplementedError()
def get_entries_by_address(self, _address, _domain):
- return []
+ raise NotImplementedError()
def get_entries_by_name(self, _name, _domain):
- return []
+ raise NotImplementedError()
def create_domain(self, _fqdomain):
- pass
+ raise NotImplementedError()
def delete_domain(self, _fqdomain):
- pass
+ raise NotImplementedError()
diff --git a/nova/network/driver.py b/nova/network/driver.py
new file mode 100644
index 000000000..2a9218898
--- /dev/null
+++ b/nova/network/driver.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+
+driver_opts = [
+ cfg.StrOpt('network_driver',
+ default='nova.network.linux_net',
+ help='Driver to use for network creation'),
+]
+CONF = cfg.CONF
+CONF.register_opts(driver_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+def load_network_driver(network_driver=None):
+ if not network_driver:
+ network_driver = CONF.network_driver
+
+ if not network_driver:
+ LOG.error(_("Network driver option required, but not specified"))
+ sys.exit(1)
+
+ LOG.info(_("Loading network driver '%s'") % network_driver)
+
+ return importutils.import_module(network_driver)
diff --git a/nova/network/l3.py b/nova/network/l3.py
index 6c16dbeb6..baf77c112 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
from nova.network import linux_net
from nova.openstack.common import log as logging
from nova import utils
@@ -24,29 +23,29 @@ LOG = logging.getLogger(__name__)
class L3Driver(object):
- """Abstract class that defines a generic L3 API"""
+ """Abstract class that defines a generic L3 API."""
def __init__(self, l3_lib=None):
raise NotImplementedError()
def initialize(self, **kwargs):
- """Set up basic L3 networking functionality"""
+ """Set up basic L3 networking functionality."""
raise NotImplementedError()
def initialize_network(self, network):
- """Enable rules for a specific network"""
+ """Enable rules for a specific network."""
raise NotImplementedError()
def initialize_gateway(self, network):
- """Set up a gateway on this network"""
+ """Set up a gateway on this network."""
raise NotImplementedError()
def remove_gateway(self, network_ref):
- """Remove an existing gateway on this network"""
+ """Remove an existing gateway on this network."""
raise NotImplementedError()
def is_initialized(self):
- """:returns: True/False (whether the driver is initialized)"""
+ """:returns: True/False (whether the driver is initialized)."""
raise NotImplementedError()
def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
@@ -69,7 +68,7 @@ class L3Driver(object):
class LinuxNetL3(L3Driver):
- """L3 driver that uses linux_net as the backend"""
+ """L3 driver that uses linux_net as the backend."""
def __init__(self):
self.initialized = False
diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py
index 661c3ad56..c5ae62899 100644
--- a/nova/network/ldapdns.py
+++ b/nova/network/ldapdns.py
@@ -15,14 +15,13 @@
import ldap
import time
-from nova import config
from nova import exception
-from nova import flags
+from nova.network import dns_driver
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ldap_dns_opts = [
@@ -96,8 +95,9 @@ class DNSEntry(object):
if not entry:
return None
if len(entry) > 1:
- LOG.warn("Found multiple matches for domain %s.\n%s" %
- (domain, entry))
+ LOG.warn(_("Found multiple matches for domain "
+ "%(domain)s.\n%(entry)s") %
+ (domain, entry))
return entry[0]
@classmethod
@@ -122,8 +122,10 @@ class DNSEntry(object):
if name.endswith(z):
dequalified = name[0:name.rfind(z)]
else:
- LOG.warn("Unable to dequalify. %s is not in %s.\n" %
- (name, self.qualified_domain))
+ LOG.warn(_("Unable to dequalify. %(name)s is not in "
+ "%(domain)s.\n") %
+ {'name': name,
+ 'domain': self.qualified_domain})
dequalified = None
return dequalified
@@ -299,7 +301,7 @@ class HostEntry(DNSEntry):
parent = property(_parent)
-class LdapDNS(object):
+class LdapDNS(dns_driver.DNSDriver):
"""Driver for PowerDNS using ldap as a back end.
This driver assumes ldap-method=strict, with all domains
@@ -358,5 +360,5 @@ class LdapDNS(object):
dEntry.delete()
def delete_dns_file(self):
- LOG.warn("This shouldn't be getting called except during testing.")
+ LOG.warn(_("This shouldn't be getting called except during testing."))
pass
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 994a3c0f4..e6abde609 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -24,15 +24,14 @@ import inspect
import netaddr
import os
-from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
+from nova import paths
from nova import utils
@@ -44,7 +43,7 @@ linux_net_opts = [
default='/etc/nova/nova-dhcpbridge.conf',
help='location of flagfile for dhcpbridge'),
cfg.StrOpt('networks_path',
- default='$state_path/networks',
+ default=paths.state_path_def('networks'),
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
@@ -53,7 +52,7 @@ linux_net_opts = [
default=None,
help='MTU setting for vlan'),
cfg.StrOpt('dhcpbridge',
- default='$bindir/nova-dhcpbridge',
+ default=paths.bindir_def('nova-dhcpbridge'),
help='location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
@@ -86,10 +85,19 @@ linux_net_opts = [
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
+ cfg.StrOpt('metadata_host',
+ default='$my_ip',
+ help='the ip for the metadata api server'),
+ cfg.IntOpt('metadata_port',
+ default=8775,
+ help='the port for the metadata api port'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(linux_net_opts)
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
@@ -280,7 +288,8 @@ class IptablesManager(object):
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
- 'nat': IptablesTable()}
+ 'nat': IptablesTable(),
+ 'mangle': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
self.iptables_apply_deferred = False
@@ -301,7 +310,8 @@ class IptablesManager(object):
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
- 'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
+ 'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING'],
+ 'mangle': ['POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
@@ -637,12 +647,18 @@ def remove_floating_forward(floating_ip, fixed_ip, device):
def floating_forward_rules(floating_ip, fixed_ip, device):
+ rules = []
rule = '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip)
if device:
- rule += ' -o %s' % device
- return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('float-snat', rule)]
+ rules.append(('float-snat', rule + ' -d %s' % fixed_ip))
+ rules.append(('float-snat', rule + ' -o %s' % device))
+ else:
+ rules.append(('float-snat', rule))
+ rules.append(
+ ('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
+ rules.append(
+ ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
+ return rules
def initialize_gateway_device(dev, network_ref):
@@ -723,6 +739,15 @@ def get_dhcp_hosts(context, network_ref):
return '\n'.join(hosts)
+def get_dns_hosts(context, network_ref):
+ """Get network's DNS hosts in hosts format."""
+ hosts = []
+ for data in db.network_get_associated_fixed_ips(context,
+ network_ref['id']):
+ hosts.append(_host_dns(data))
+ return '\n'.join(hosts)
+
+
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
@@ -735,6 +760,36 @@ def _add_dnsmasq_accept_rules(dev):
iptables_manager.apply()
+def _remove_dnsmasq_accept_rules(dev):
+ """Remove DHCP and DNS traffic allowed through to dnsmasq."""
+ table = iptables_manager.ipv4['filter']
+ for port in [67, 53]:
+ for proto in ['udp', 'tcp']:
+ args = {'dev': dev, 'port': port, 'proto': proto}
+ table.remove_rule('INPUT',
+ '-i %(dev)s -p %(proto)s -m %(proto)s '
+ '--dport %(port)s -j ACCEPT' % args)
+ iptables_manager.apply()
+
+
+def _add_dhcp_mangle_rule(dev):
+ if not os.path.exists('/dev/vhost-net'):
+ return
+ table = iptables_manager.ipv4['mangle']
+ table.add_rule('POSTROUTING',
+ '-o %s -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill' % dev)
+ iptables_manager.apply()
+
+
+def _remove_dhcp_mangle_rule(dev):
+ table = iptables_manager.ipv4['mangle']
+ table.remove_rule('POSTROUTING',
+ '-o %s -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill' % dev)
+ iptables_manager.apply()
+
+
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
hosts = []
@@ -756,6 +811,7 @@ def get_dhcp_opts(context, network_ref):
default_gw_vif[instance_uuid] = vifs[0]['id']
for datum in data:
+ instance_uuid = datum['instance_uuid']
if instance_uuid in default_gw_vif:
# we don't want default gateway for this fixed ip
if default_gw_vif[instance_uuid] != datum['vif_id']:
@@ -773,6 +829,12 @@ def update_dhcp(context, dev, network_ref):
restart_dhcp(context, dev, network_ref)
+def update_dns(context, dev, network_ref):
+ hostsfile = _dhcp_file(dev, 'hosts')
+ write_to_file(hostsfile, get_dns_hosts(context, network_ref))
+ restart_dhcp(context, dev, network_ref)
+
+
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
@@ -789,6 +851,8 @@ def kill_dhcp(dev):
_execute('kill', '-9', pid, run_as_root=True)
else:
LOG.debug(_('Pid %d is stale, skip killing dnsmasq'), pid)
+ _remove_dnsmasq_accept_rules(dev)
+ _remove_dhcp_mangle_rule(dev)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
@@ -811,6 +875,9 @@ def restart_dhcp(context, dev, network_ref):
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0644)
+ if network_ref['multi_host']:
+ _add_dhcp_mangle_rule(dev)
+
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
@@ -833,7 +900,7 @@ def restart_dhcp(context, dev, network_ref):
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['env',
- 'FLAGFILE=%s' % CONF.dhcpbridge_flagfile,
+ 'CONFIG_FILE=%s' % CONF.dhcpbridge_flagfile,
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
@@ -851,6 +918,8 @@ def restart_dhcp(context, dev, network_ref):
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--leasefile-ro']
+ if network_ref['multi_host'] and not CONF.dns_server:
+ cmd += ['--no-hosts', '--addn-hosts=%s' % _dhcp_file(dev, 'hosts')]
if CONF.dns_server:
cmd += ['-h', '-R', '--server=%s' % CONF.dns_server]
@@ -938,6 +1007,12 @@ def _host_dhcp(data):
data['address'])
+def _host_dns(data):
+ return '%s\t%s.%s' % (data['address'],
+ data['instance_hostname'],
+ CONF.dhcp_domain)
+
+
def _host_dhcp_opts(data):
"""Return an empty gateway option."""
return '%s,%s' % (_host_dhcp_network(data), 3)
@@ -952,7 +1027,7 @@ def _execute(*cmd, **kwargs):
return utils.execute(*cmd, **kwargs)
-def _device_exists(device):
+def device_exists(device):
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False, run_as_root=True)
@@ -960,7 +1035,7 @@ def _device_exists(device):
def _dhcp_file(dev, kind):
- """Return path to a pid, leases or conf file for a bridge/device."""
+ """Return path to a pid, leases, hosts or conf file for a bridge/device."""
fileutils.ensure_tree(CONF.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (CONF.networks_path,
dev,
@@ -1021,12 +1096,12 @@ def _create_veth_pair(dev1_name, dev2_name):
deleting any previous devices with those names.
"""
for dev in [dev1_name, dev2_name]:
- if _device_exists(dev):
+ if device_exists(dev):
try:
utils.execute('ip', 'link', 'delete', dev1_name,
run_as_root=True, check_exit_code=[0, 2, 254])
except exception.ProcessExecutionError:
- LOG.exception("Error clearing stale veth %s" % dev)
+ LOG.exception(_("Error clearing stale veth %s") % dev)
utils.execute('ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
'name', dev2_name, run_as_root=True)
@@ -1065,19 +1140,21 @@ def get_dev(network):
class LinuxNetInterfaceDriver(object):
- """Abstract class that defines generic network host API"""
- """ for for all Linux interface drivers."""
+ """
+ Abstract class that defines generic network host API
+ for for all Linux interface drivers.
+ """
def plug(self, network, mac_address):
- """Create Linux device, return device name"""
+ """Create Linux device, return device name."""
raise NotImplementedError()
def unplug(self, network):
- """Destory Linux device, return device name"""
+ """Destory Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
- """Get device name"""
+ """Get device name."""
raise NotImplementedError()
@@ -1085,14 +1162,16 @@ class LinuxNetInterfaceDriver(object):
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
- if network.get('vlan', None) is not None:
+ vlan = network.get('vlan')
+ if vlan is not None:
iface = CONF.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
- network['vlan'],
+ vlan,
network['bridge'],
iface,
network,
mac_address)
+ iface = 'vlan%s' % vlan
else:
iface = CONF.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
@@ -1100,11 +1179,27 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
iface,
network, gateway)
+ if CONF.share_dhcp_address:
+ isolate_dhcp_address(iface, network['dhcp_server'])
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
- def unplug(self, network):
+ def unplug(self, network, gateway=True):
+ vlan = network.get('vlan')
+ if vlan is not None:
+ iface = 'vlan%s' % vlan
+ LinuxBridgeInterfaceDriver.remove_vlan_bridge(vlan,
+ network['bridge'])
+ else:
+ iface = CONF.flat_interface or network['bridge_interface']
+ LinuxBridgeInterfaceDriver.remove_bridge(network['bridge'],
+ gateway)
+
+ if CONF.share_dhcp_address:
+ remove_isolate_dhcp_address(iface, network['dhcp_server'])
+
+ iptables_manager.apply()
return self.get_dev(network)
def get_dev(self, network):
@@ -1120,11 +1215,17 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
return interface
@classmethod
- @lockutils.synchronized('ensure_vlan', 'nova-', external=True)
+ def remove_vlan_bridge(cls, vlan_num, bridge):
+ """Delete a bridge and vlan."""
+ LinuxBridgeInterfaceDriver.remove_bridge(bridge)
+ LinuxBridgeInterfaceDriver.remove_vlan(vlan_num)
+
+ @classmethod
+ @lockutils.synchronized('lock_vlan', 'nova-', external=True)
def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
- if not _device_exists(interface):
+ if not device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
@@ -1145,12 +1246,32 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
return interface
@classmethod
- @lockutils.synchronized('ensure_bridge', 'nova-', external=True)
- def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
+ @lockutils.synchronized('lock_vlan', 'nova-', external=True)
+ def remove_vlan(cls, vlan_num):
+ """Delete a vlan."""
+ vlan_interface = 'vlan%s' % vlan_num
+ if not device_exists(vlan_interface):
+ return
+ else:
+ try:
+ utils.execute('ip', 'link', 'delete', vlan_interface,
+ run_as_root=True, check_exit_code=[0, 2, 254])
+ except exception.ProcessExecutionError:
+ LOG.error(_("Failed unplugging VLAN interface '%s'"),
+ vlan_interface)
+ raise
+ LOG.debug(_("Unplugged VLAN interface '%s'"), vlan_interface)
+
+ @classmethod
+ @lockutils.synchronized('lock_bridge', 'nova-', external=True)
+ def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True,
+ filtering=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
+ :param gateway: whether or not the bridge is a gateway.
+ :param filtering: whether or not to create filters on the bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
@@ -1160,8 +1281,8 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
interface onto the bridge and reset the default gateway if necessary.
"""
- if not _device_exists(bridge):
- LOG.debug(_('Starting Bridge interface for %s'), interface)
+ if not device_exists(bridge):
+ LOG.debug(_('Starting Bridge %s'), bridge)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
@@ -1173,6 +1294,8 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
+ msg = _('Adding interface %(interface)s to bridge %(bridge)s')
+ LOG.debug(msg % locals())
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
@@ -1207,18 +1330,115 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
- # Don't forward traffic unless we were told to be a gateway
- ipv4_filter = iptables_manager.ipv4['filter']
- if gateway:
- ipv4_filter.add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % bridge)
- ipv4_filter.add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % bridge)
+ if filtering:
+ # Don't forward traffic unless we were told to be a gateway
+ ipv4_filter = iptables_manager.ipv4['filter']
+ if gateway:
+ ipv4_filter.add_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % bridge)
+ ipv4_filter.add_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % bridge)
+ else:
+ ipv4_filter.add_rule('FORWARD',
+ '--in-interface %s -j DROP' % bridge)
+ ipv4_filter.add_rule('FORWARD',
+ '--out-interface %s -j DROP' % bridge)
+
+ @classmethod
+ @lockutils.synchronized('lock_bridge', 'nova-', external=True)
+ def remove_bridge(cls, bridge, gateway=True, filtering=True):
+ """Delete a bridge."""
+ if not device_exists(bridge):
+ return
else:
- ipv4_filter.add_rule('FORWARD',
- '--in-interface %s -j DROP' % bridge)
- ipv4_filter.add_rule('FORWARD',
- '--out-interface %s -j DROP' % bridge)
+ if filtering:
+ ipv4_filter = iptables_manager.ipv4['filter']
+ if gateway:
+ ipv4_filter.remove_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % bridge)
+ ipv4_filter.remove_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % bridge)
+ else:
+ ipv4_filter.remove_rule('FORWARD',
+ '--in-interface %s -j DROP' % bridge)
+ ipv4_filter.remove_rule('FORWARD',
+ '--out-interface %s -j DROP' % bridge)
+ try:
+ utils.execute('ip', 'link', 'delete', bridge, run_as_root=True,
+ check_exit_code=[0, 2, 254])
+ except exception.ProcessExecutionError:
+ LOG.error(_("Failed unplugging bridge interface '%s'"), bridge)
+ raise
+
+ LOG.debug(_("Unplugged bridge interface '%s'"), bridge)
+
+
+@lockutils.synchronized('ebtables', 'nova-', external=True)
+def ensure_ebtables_rules(rules):
+ for rule in rules:
+ cmd = ['ebtables', '-D'] + rule.split()
+ _execute(*cmd, check_exit_code=False, run_as_root=True)
+ cmd[1] = '-I'
+ _execute(*cmd, run_as_root=True)
+
+
+@lockutils.synchronized('ebtables', 'nova-', external=True)
+def remove_ebtables_rules(rules):
+ for rule in rules:
+ cmd = ['ebtables', '-D'] + rule.split()
+ _execute(*cmd, check_exit_code=False, run_as_root=True)
+
+
+def isolate_dhcp_address(interface, address):
+ # block arp traffic to address accross the interface
+ rules = []
+ rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
+ % (interface, address))
+ rules.append('OUTPUT -p ARP -o %s --arp-ip-src %s -j DROP'
+ % (interface, address))
+ # NOTE(vish): the above is not possible with iptables/arptables
+ ensure_ebtables_rules(rules)
+ # block dhcp broadcast traffic across the interface
+ ipv4_filter = iptables_manager.ipv4['filter']
+ ipv4_filter.add_rule('FORWARD',
+ '-m physdev --physdev-in %s -d 255.255.255.255 '
+ '-p udp --dport 67 -j DROP' % interface, top=True)
+ ipv4_filter.add_rule('FORWARD',
+ '-m physdev --physdev-out %s -d 255.255.255.255 '
+ '-p udp --dport 67 -j DROP' % interface, top=True)
+ # block ip traffic to address accross the interface
+ ipv4_filter.add_rule('FORWARD',
+ '-m physdev --physdev-in %s -d %s -j DROP'
+ % (interface, address), top=True)
+ ipv4_filter.add_rule('FORWARD',
+ '-m physdev --physdev-out %s -s %s -j DROP'
+ % (interface, address), top=True)
+
+
+def remove_isolate_dhcp_address(interface, address):
+ # block arp traffic to address accross the interface
+ rules = []
+ rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
+ % (interface, address))
+ rules.append('OUTPUT -p ARP -o %s --arp-ip-src %s -j DROP'
+ % (interface, address))
+ remove_ebtables_rules(rules)
+ # NOTE(vish): the above is not possible with iptables/arptables
+ # block dhcp broadcast traffic across the interface
+ ipv4_filter = iptables_manager.ipv4['filter']
+ ipv4_filter.remove_rule('FORWARD',
+ '-m physdev --physdev-in %s -d 255.255.255.255 '
+ '-p udp --dport 67 -j DROP' % interface, top=True)
+ ipv4_filter.remove_rule('FORWARD',
+ '-m physdev --physdev-out %s -d 255.255.255.255 '
+ '-p udp --dport 67 -j DROP' % interface, top=True)
+ # block ip traffic to address accross the interface
+ ipv4_filter.remove_rule('FORWARD',
+ '-m physdev --physdev-in %s -d %s -j DROP'
+ % (interface, address), top=True)
+ ipv4_filter.remove_rule('FORWARD',
+ '-m physdev --physdev-out %s -s %s -j DROP'
+ % (interface, address), top=True)
# plugs interfaces using Open vSwitch
@@ -1226,7 +1446,7 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
- if not _device_exists(dev):
+ if not device_exists(dev):
bridge = CONF.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
'--', '--may-exist', 'add-port', bridge, dev,
@@ -1304,7 +1524,7 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev, mac_address)
- if not _device_exists(bridge):
+ if not device_exists(bridge):
LOG.debug(_("Starting bridge %s "), bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
@@ -1325,7 +1545,7 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def unplug(self, network):
dev = self.get_dev(network)
- if not _device_exists(dev):
+ if not device_exists(dev):
return None
else:
try:
@@ -1339,7 +1559,7 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
@classmethod
def create_tap_dev(_self, dev, mac_address=None):
- if not _device_exists(dev):
+ if not device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
diff --git a/nova/network/manager.py b/nova/network/manager.py
index d032fe159..ccdac6f60 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -49,30 +49,33 @@ import itertools
import math
import re
import socket
+import uuid
from eventlet import greenpool
import netaddr
from nova.compute import api as compute_api
-from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova import ipv6
from nova import manager
from nova.network import api as network_api
+from nova.network import driver
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
+from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
+from nova import servicegroup
from nova import utils
@@ -147,22 +150,54 @@ network_opts = [
cfg.StrOpt('network_host',
default=socket.getfqdn(),
help='Network host to use for ip allocation in flat modes'),
+ cfg.BoolOpt('fake_network',
+ default=False,
+ help='If passed, use fake network devices and addresses'),
cfg.BoolOpt('fake_call',
default=False,
help='If True, skip using the queue and make local calls'),
+ cfg.BoolOpt('teardown_unused_network_gateway',
+ default=False,
+ help='If True, unused gateway devices (VLAN and bridge) are '
+ 'deleted in VLAN network mode with multi hosted '
+ 'networks'),
cfg.BoolOpt('force_dhcp_release',
default=False,
help='If True, send a dhcp release on instance termination'),
+ cfg.BoolOpt('share_dhcp_address',
+ default=False,
+ help='If True in multi_host mode, all compute hosts share '
+ 'the same dhcp address.'),
+ cfg.BoolOpt('update_dns_entries',
+ default=False,
+ help='If True, when a DNS entry must be updated, it sends a '
+ 'fanout cast to all network hosts to update their DNS '
+ 'entries in multi host mode'),
+ cfg.IntOpt("dns_update_periodic_interval",
+ default=-1,
+ help='Number of seconds to wait between runs of updates to DNS '
+ 'entries.'),
cfg.StrOpt('dhcp_domain',
default='novalocal',
help='domain to use for building the hostnames'),
cfg.StrOpt('l3_lib',
default='nova.network.l3.LinuxNetL3',
- help="Indicates underlying L3 management library")
+ help="Indicates underlying L3 management library"),
+ cfg.StrOpt('instance_dns_manager',
+ default='nova.network.noop_dns_driver.NoopDNSDriver',
+ help='full class name for the DNS Manager for instance IPs'),
+ cfg.StrOpt('instance_dns_domain',
+ default='',
+ help='full class name for the DNS Zone for instance IPs'),
+ cfg.StrOpt('floating_ip_dns_manager',
+ default='nova.network.noop_dns_driver.NoopDNSDriver',
+ help='full class name for the DNS Manager for floating IPs'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(network_opts)
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
class RPCAllocateFixedIP(object):
@@ -171,6 +206,9 @@ class RPCAllocateFixedIP(object):
used since they share code to RPC.call allocate_fixed_ip on the
correct network host to configure dnsmasq
"""
+
+ servicegroup_api = None
+
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
@@ -233,7 +271,7 @@ class RPCAllocateFixedIP(object):
service = self.db.service_get_by_host_and_topic(context,
host,
'network')
- if not service or not utils.service_is_up(service):
+ if not service or not self.servicegroup_api.service_is_up(service):
# NOTE(vish): deallocate the fixed ip locally but don't
# teardown network devices
return super(RPCAllocateFixedIP, self).deallocate_fixed_ip(
@@ -243,7 +281,7 @@ class RPCAllocateFixedIP(object):
def wrap_check_policy(func):
- """Check policy corresponding to the wrapped methods prior to execution"""
+ """Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
@@ -265,6 +303,9 @@ def check_policy(context, action):
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
+
+ servicegroup_api = None
+
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
@@ -382,7 +423,7 @@ class FloatingIP(object):
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
- """Raises if floating ip does not belong to project"""
+ """Raises if floating ip does not belong to project."""
if context.is_admin:
return
@@ -437,6 +478,7 @@ class FloatingIP(object):
return floating_ip
+ @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
@wrap_check_policy
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
@@ -483,6 +525,7 @@ class FloatingIP(object):
if reservations:
QUOTAS.commit(context, reservations)
+ @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
@wrap_check_policy
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
@@ -506,6 +549,9 @@ class FloatingIP(object):
# find previously associated instance
fixed_ip = self.db.fixed_ip_get(context,
floating_ip['fixed_ip_id'])
+ if fixed_ip['address'] == fixed_address:
+ # NOTE(vish): already associated to this address
+ return
orig_instance_uuid = fixed_ip['instance_uuid']
self.disassociate_floating_ip(context, floating_address)
@@ -526,39 +572,50 @@ class FloatingIP(object):
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
- fixed_address, interface)
+ fixed_address, interface,
+ fixed_ip['instance_uuid'])
else:
# send to correct host
self.network_rpcapi._associate_floating_ip(context,
- floating_address, fixed_address, interface, host)
+ floating_address, fixed_address, interface, host,
+ fixed_ip['instance_uuid'])
return orig_instance_uuid
def _associate_floating_ip(self, context, floating_address, fixed_address,
- interface):
- """Performs db and driver calls to associate floating ip & fixed ip"""
- # associate floating ip
- self.db.floating_ip_fixed_ip_associate(context,
- floating_address,
- fixed_address,
- self.host)
- try:
- # gogo driver time
- self.l3driver.add_floating_ip(floating_address, fixed_address,
- interface)
- except exception.ProcessExecutionError as e:
- fixed_address = self.db.floating_ip_disassociate(context,
- floating_address)
- if "Cannot find device" in str(e):
- LOG.error(_('Interface %(interface)s not found'), locals())
- raise exception.NoFloatingIpInterface(interface=interface)
- payload = dict(project_id=context.project_id,
- floating_ip=floating_address)
- notifier.notify(context,
- notifier.publisher_id("network"),
- 'network.floating_ip.associate',
- notifier.INFO, payload=payload)
+ interface, instance_uuid):
+ """Performs db and driver calls to associate floating ip & fixed ip."""
+
+ @lockutils.synchronized(unicode(floating_address), 'nova-')
+ def do_associate():
+ # associate floating ip
+ res = self.db.floating_ip_fixed_ip_associate(context,
+ floating_address,
+ fixed_address,
+ self.host)
+ if not res:
+ # NOTE(vish): ip was already associated
+ return
+ try:
+ # gogo driver time
+ self.l3driver.add_floating_ip(floating_address, fixed_address,
+ interface)
+ except exception.ProcessExecutionError as e:
+ self.db.floating_ip_disassociate(context, floating_address)
+ if "Cannot find device" in str(e):
+ LOG.error(_('Interface %(interface)s not found'), locals())
+ raise exception.NoFloatingIpInterface(interface=interface)
+ payload = dict(project_id=context.project_id,
+ instance_id=instance_uuid,
+ floating_ip=floating_address)
+ notifier.notify(context,
+ notifier.publisher_id("network"),
+ 'network.floating_ip.associate',
+ notifier.INFO, payload=payload)
+ do_associate()
+
+ @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
@wrap_check_policy
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
@@ -591,7 +648,7 @@ class FloatingIP(object):
fixed_ip['instance_uuid'])
service = self.db.service_get_by_host_and_topic(
context.elevated(), instance['host'], 'network')
- if service and utils.service_is_up(service):
+ if service and self.servicegroup_api.service_is_up(service):
host = instance['host']
else:
# NOTE(vish): if the service is down just deallocate the data
@@ -605,53 +662,73 @@ class FloatingIP(object):
if host == self.host:
# i'm the correct host
- self._disassociate_floating_ip(context, address, interface)
+ self._disassociate_floating_ip(context, address, interface,
+ fixed_ip['instance_uuid'])
else:
# send to correct host
self.network_rpcapi._disassociate_floating_ip(context, address,
- interface, host)
+ interface, host, fixed_ip['instance_uuid'])
- def _disassociate_floating_ip(self, context, address, interface):
- """Performs db and driver calls to disassociate floating ip"""
+ def _disassociate_floating_ip(self, context, address, interface,
+ instance_uuid):
+ """Performs db and driver calls to disassociate floating ip."""
# disassociate floating ip
- fixed_address = self.db.floating_ip_disassociate(context, address)
- if interface:
- # go go driver time
- self.l3driver.remove_floating_ip(address, fixed_address, interface)
- payload = dict(project_id=context.project_id, floating_ip=address)
- notifier.notify(context,
- notifier.publisher_id("network"),
- 'network.floating_ip.disassociate',
- notifier.INFO, payload=payload)
+ @lockutils.synchronized(unicode(address), 'nova-')
+ def do_disassociate():
+ # NOTE(vish): Note that we are disassociating in the db before we
+ # actually remove the ip address on the host. We are
+ # safe from races on this host due to the decorator,
+ # but another host might grab the ip right away. We
+ # don't worry about this case because the miniscule
+ # window where the ip is on both hosts shouldn't cause
+ # any problems.
+ fixed_address = self.db.floating_ip_disassociate(context, address)
+
+ if not fixed_address:
+ # NOTE(vish): ip was already disassociated
+ return
+ if interface:
+ # go go driver time
+ self.l3driver.remove_floating_ip(address, fixed_address,
+ interface)
+ payload = dict(project_id=context.project_id,
+ instance_id=instance_uuid,
+ floating_ip=address)
+ notifier.notify(context,
+ notifier.publisher_id("network"),
+ 'network.floating_ip.disassociate',
+ notifier.INFO, payload=payload)
+ do_disassociate()
+ @rpc_common.client_exceptions(exception.FloatingIpNotFound)
@wrap_check_policy
def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return dict(self.db.floating_ip_get(context, id).iteritems())
@wrap_check_policy
def get_floating_pools(self, context):
- """Returns list of floating pools"""
+ """Returns list of floating pools."""
pools = self.db.floating_ip_get_pools(context)
return [dict(pool.iteritems()) for pool in pools]
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return dict(self.db.floating_ip_get_by_address(context,
address).iteritems())
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project"""
+ """Returns the floating IPs allocated to a project."""
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return [dict(ip.iteritems()) for ip in ips]
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address"""
+ """Returns the floating IPs associated with a fixed_address."""
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
@@ -664,11 +741,13 @@ class FloatingIP(object):
return False if floating_ip.get('fixed_ip_id') else True
@wrap_check_policy
- def migrate_instance_start(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_start(self, context, instance_uuid,
+ floating_addresses,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
- if not floating_addresses or source == dest:
+ if not floating_addresses or (source and source == dest):
return
LOG.info(_("Starting migration network for instance"
@@ -697,11 +776,15 @@ class FloatingIP(object):
{'host': None})
@wrap_check_policy
- def migrate_instance_finish(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_finish(self, context, instance_uuid,
+ floating_addresses, host=None,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
- if not floating_addresses or source == dest:
+ if host and not dest:
+ dest = host
+ if not floating_addresses or (source and source == dest):
return
LOG.info(_("Finishing migration network for instance"
@@ -730,14 +813,14 @@ class FloatingIP(object):
def _prepare_domain_entry(self, context, domain):
domainref = self.db.dnsdomain_get(context, domain)
- scope = domainref.scope
+ scope = domainref['scope']
if scope == 'private':
- av_zone = domainref.availability_zone
+ av_zone = domainref['availability_zone']
this_domain = {'domain': domain,
'scope': scope,
'availability_zone': av_zone}
else:
- project = domainref.project_id
+ project = domainref['project_id']
this_domain = {'domain': domain,
'scope': scope,
'project': project}
@@ -841,7 +924,7 @@ class NetworkManager(manager.SchedulerDependentManager):
The one at a time part is to flatten the layout to help scale
"""
- RPC_API_VERSION = '1.2'
+ RPC_API_VERSION = '1.6'
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
@@ -857,9 +940,7 @@ class NetworkManager(manager.SchedulerDependentManager):
required_create_args = []
def __init__(self, network_driver=None, *args, **kwargs):
- if not network_driver:
- network_driver = CONF.network_driver
- self.driver = importutils.import_module(network_driver)
+ self.driver = driver.load_network_driver(network_driver)
self.instance_dns_manager = importutils.import_object(
CONF.instance_dns_manager)
self.instance_dns_domain = CONF.instance_dns_domain
@@ -870,6 +951,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self.security_group_api = compute_api.SecurityGroupAPI()
self.compute_api = compute_api.API(
security_group_api=self.security_group_api)
+ self.servicegroup_api = servicegroup.API()
# NOTE(tr3buchet: unless manager subclassing NetworkManager has
# already imported ipam, import nova ipam here
@@ -888,7 +970,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
# NOTE(vish): this is for compatibility
- if not network_ref.get('multi_host'):
+ if not network_ref.get('multi_host') or CONF.share_dhcp_address:
return network_ref['gateway']
if not host:
@@ -906,7 +988,7 @@ class NetworkManager(manager.SchedulerDependentManager):
host=host)
def get_dhcp_leases(self, ctxt, network_ref):
- """Broker the request to the driver to fetch the dhcp leases"""
+ """Broker the request to the driver to fetch the dhcp leases."""
return self.driver.get_dhcp_leases(ctxt, network_ref)
def init_host(self):
@@ -918,6 +1000,9 @@ class NetworkManager(manager.SchedulerDependentManager):
ctxt = context.get_admin_context()
for network in self.db.network_get_all_by_host(ctxt, self.host):
self._setup_network_on_host(ctxt, network)
+ if CONF.update_dns_entries:
+ dev = self.driver.get_dev(network)
+ self.driver.update_dns(ctxt, dev, network)
@manager.periodic_task
def _disassociate_stale_fixed_ips(self, context):
@@ -958,6 +1043,19 @@ class NetworkManager(manager.SchedulerDependentManager):
self.security_group_api.trigger_handler('security_group_members',
admin_context, group_ids)
+ def _do_trigger_security_group_handler(self, handler, instance_id):
+ admin_context = context.get_admin_context(read_deleted="yes")
+ if uuidutils.is_uuid_like(instance_id):
+ instance_ref = self.db.instance_get_by_uuid(admin_context,
+ instance_id)
+ else:
+ instance_ref = self.db.instance_get(admin_context,
+ instance_id)
+ for group_name in [group['name'] for group
+ in instance_ref['security_groups']]:
+ self.security_group_api.trigger_handler(handler, admin_context,
+ instance_ref, group_name)
+
def get_floating_ips_by_fixed_address(self, context, fixed_address):
# NOTE(jkoelker) This is just a stub function. Managers supporting
# floating ips MUST override this or use the Mixin
@@ -1058,6 +1156,11 @@ class NetworkManager(manager.SchedulerDependentManager):
self._allocate_fixed_ips(admin_context, instance_id,
host, networks, vpn=vpn,
requested_networks=requested_networks)
+
+ if CONF.update_dns_entries:
+ network_ids = [network['id'] for network in networks]
+ self.network_rpcapi.update_dns(context, network_ids)
+
return self.get_instance_nw_info(context, instance_id, instance_uuid,
rxtx_factor, host)
@@ -1075,6 +1178,7 @@ class NetworkManager(manager.SchedulerDependentManager):
instance_id = kwargs.pop('instance_id')
instance = self.db.instance_get(read_deleted_context, instance_id)
+ host = kwargs.get('host')
try:
fixed_ips = (kwargs.get('fixed_ips') or
@@ -1086,8 +1190,11 @@ class NetworkManager(manager.SchedulerDependentManager):
context=read_deleted_context)
# deallocate fixed ips
for fixed_ip in fixed_ips:
- self.deallocate_fixed_ip(context, fixed_ip['address'],
- host=kwargs.get('host'))
+ self.deallocate_fixed_ip(context, fixed_ip['address'], host=host)
+
+ if CONF.update_dns_entries:
+ network_ids = [fixed_ip['network_id'] for fixed_ip in fixed_ips]
+ self.network_rpcapi.update_dns(context, network_ids)
# deallocate vifs (mac addresses)
self.db.virtual_interface_delete_by_instance(read_deleted_context,
@@ -1124,6 +1231,7 @@ class NetworkManager(manager.SchedulerDependentManager):
nw_info = network_model.NetworkInfo()
for vif in vifs:
vif_dict = {'id': vif['uuid'],
+ 'type': network_model.VIF_TYPE_BRIDGE,
'address': vif['address']}
# handle case where vif doesn't have a network
@@ -1193,7 +1301,7 @@ class NetworkManager(manager.SchedulerDependentManager):
return nw_info
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = {'id': network['uuid'],
'bridge': network['bridge'],
@@ -1208,7 +1316,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def _get_subnets_from_network(self, context, network,
vif, instance_host=None):
- """Returns the 1 or 2 possible subnets for a nova network"""
+ """Returns the 1 or 2 possible subnets for a nova network."""
# get subnets
ipam_subnets = self.ipam.get_subnets_by_net_id(context,
network['project_id'], network['uuid'], vif['uuid'])
@@ -1263,7 +1371,7 @@ class NetworkManager(manager.SchedulerDependentManager):
vif = {'address': utils.generate_mac_address(),
'instance_uuid': instance_uuid,
'network_id': network_id,
- 'uuid': str(utils.gen_uuid())}
+ 'uuid': str(uuid.uuid4())}
# try FLAG times to create a vif record with a unique mac_address
for i in xrange(CONF.create_unique_mac_address_attempts):
try:
@@ -1284,6 +1392,10 @@ class NetworkManager(manager.SchedulerDependentManager):
network = self._get_network_by_id(context, network_id)
self._allocate_fixed_ips(context, instance_id, host, [network])
+ def get_backdoor_port(self, context):
+ """Return backdoor port for eventlet_backdoor."""
+ return self.backdoor_port
+
@wrap_check_policy
def remove_fixed_ip_from_instance(self, context, instance_id, host,
address):
@@ -1294,7 +1406,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self.deallocate_fixed_ip(context, address, host)
return
raise exception.FixedIpNotFoundForSpecificInstance(
- instance_id=instance_id, ip=address)
+ instance_uuid=instance_id, ip=address)
def _validate_instance_zone_for_dns_domain(self, context, instance):
instance_zone = instance.get('availability_zone')
@@ -1339,6 +1451,8 @@ class NetworkManager(manager.SchedulerDependentManager):
instance_ref['uuid'])
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
+ self._do_trigger_security_group_handler(
+ 'instance_add_security_group', instance_id)
get_vif = self.db.virtual_interface_get_by_instance_and_network
vif = get_vif(context, instance_ref['uuid'], network['id'])
values = {'allocated': True,
@@ -1368,6 +1482,8 @@ class NetworkManager(manager.SchedulerDependentManager):
self._do_trigger_security_group_members_refresh_for_instance(
instance['uuid'])
+ self._do_trigger_security_group_handler(
+ 'instance_remove_security_group', instance['uuid'])
if self._validate_instance_zone_for_dns_domain(context, instance):
for n in self.instance_dns_manager.get_entries_by_address(address,
@@ -1375,10 +1491,13 @@ class NetworkManager(manager.SchedulerDependentManager):
self.instance_dns_manager.delete_entry(n,
self.instance_dns_domain)
+ self.db.fixed_ip_update(context, address,
+ {'allocated': False,
+ 'virtual_interface_id': None})
+
if teardown:
network = self._get_network_by_id(context,
fixed_ip_ref['network_id'])
- self._teardown_network_on_host(context, network)
if CONF.force_dhcp_release:
dev = self.driver.get_dev(network)
@@ -1401,9 +1520,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# callback will get called by nova-dhcpbridge.
self.driver.release_dhcp(dev, address, vif['address'])
- self.db.fixed_ip_update(context, address,
- {'allocated': False,
- 'virtual_interface_id': None})
+ self._teardown_network_on_host(context, network)
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
@@ -1493,7 +1610,7 @@ class NetworkManager(manager.SchedulerDependentManager):
fixnet = netaddr.IPNetwork(kwargs["cidr"])
each_subnet_size = fixnet.size / kwargs["num_networks"]
if each_subnet_size > CONF.network_size:
- subnet = 32 - int(math.log(CONF.network_size_size, 2))
+ subnet = 32 - int(math.log(CONF.network_size, 2))
oversize_msg = _(
'Subnet(s) too large, defaulting to /%s.'
' To override, specify network_size flag.') % subnet
@@ -1632,8 +1749,6 @@ class NetworkManager(manager.SchedulerDependentManager):
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
- del net['dns1']
- del net['dns2']
vlan = kwargs['vlan_start'] + index
net['vpn_private_address'] = str(subnet_v4[2])
net['dhcp_start'] = str(subnet_v4[3])
@@ -1712,7 +1827,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def setup_networks_on_host(self, context, instance_id, host,
teardown=False):
- """calls setup/teardown on network hosts associated with an instance"""
+ """calls setup/teardown on network hosts for an instance."""
green_pool = greenpool.GreenPool()
if teardown:
@@ -1802,14 +1917,14 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_vifs_by_instance(self, context, instance_id):
- """Returns the vifs associated with an instance"""
+ """Returns the vifs associated with an instance."""
instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
return [dict(vif.iteritems()) for vif in vifs]
def get_instance_id_by_floating_address(self, context, address):
- """Returns the instance id a floating ip's fixed ip is allocated to"""
+ """Returns the instance id a floating ip's fixed ip is allocated to."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if floating_ip['fixed_ip_id'] is None:
return None
@@ -1829,7 +1944,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_network(self, context, network_uuid):
network = self.db.network_get_by_uuid(context.elevated(), network_uuid)
- return dict(network.iteritems())
+ return jsonutils.to_primitive(network)
@wrap_check_policy
def get_all_networks(self, context):
@@ -1837,7 +1952,7 @@ class NetworkManager(manager.SchedulerDependentManager):
networks = self.db.network_get_all(context)
except exception.NoNetworksFound:
return []
- return [dict(network.iteritems()) for network in networks]
+ return [jsonutils.to_primitive(network) for network in networks]
@wrap_check_policy
def disassociate_network(self, context, network_uuid):
@@ -1846,20 +1961,44 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_fixed_ip(self, context, id):
- """Return a fixed ip"""
+ """Return a fixed ip."""
fixed = self.db.fixed_ip_get(context, id)
- return dict(fixed.iteritems())
+ return jsonutils.to_primitive(fixed)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
fixed = self.db.fixed_ip_get_by_address(context, address)
- return dict(fixed.iteritems())
+ return jsonutils.to_primitive(fixed)
def get_vif_by_mac_address(self, context, mac_address):
- """Returns the vifs record for the mac_address"""
+ """Returns the vifs record for the mac_address."""
return self.db.virtual_interface_get_by_address(context,
mac_address)
+ @manager.periodic_task(
+ spacing=CONF.dns_update_periodic_interval)
+ def _periodic_update_dns(self, context):
+ """Update local DNS entries of all networks on this host."""
+ networks = self.db.network_get_all_by_host(context, self.host)
+ for network in networks:
+ dev = self.driver.get_dev(network)
+ self.driver.update_dns(context, dev, network)
+
+ def update_dns(self, context, network_ids):
+ """Called when fixed IP is allocated or deallocated."""
+ if CONF.fake_network:
+ return
+
+ for network_id in network_ids:
+ network = self.db.network_get(context, network_id)
+ if not network['multi_host']:
+ continue
+ host_networks = self.db.network_get_all_by_host(context, self.host)
+ for host_network in host_networks:
+ if host_network['id'] == network_id:
+ dev = self.driver.get_dev(network)
+ self.driver.update_dns(context, dev, network)
+
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
@@ -1932,35 +2071,43 @@ class FlatManager(NetworkManager):
@wrap_check_policy
def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return None
@wrap_check_policy
def get_floating_pools(self, context):
- """Returns list of floating pools"""
+ """Returns list of floating pools."""
return {}
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return None
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project"""
+ """Returns the floating IPs allocated to a project."""
return []
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address"""
+ """Returns the floating IPs associated with a fixed_address."""
return []
- def migrate_instance_start(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_start(self, context, instance_uuid,
+ floating_addresses,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
+ pass
+
+ def migrate_instance_finish(self, context, instance_uuid,
+ floating_addresses, host=None,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
pass
- def migrate_instance_finish(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def update_dns(self, context, network_ids):
+ """Called when fixed IP is allocated or deallocated."""
pass
@@ -1993,7 +2140,9 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
if not CONF.fake_network:
dev = self.driver.get_dev(network)
- self.driver.update_dhcp(context, dev, network)
+ # NOTE(dprince): dhcp DB queries require elevated context
+ elevated = context.elevated()
+ self.driver.update_dhcp(elevated, dev, network)
if(CONF.use_ipv6):
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
@@ -2004,10 +2153,12 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
if not CONF.fake_network:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
- self.driver.update_dhcp(context, dev, network)
+ # NOTE(dprince): dhcp DB queries require elevated context
+ elevated = context.elevated()
+ self.driver.update_dhcp(elevated, dev, network)
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
@@ -2104,6 +2255,27 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
network_id = None
self.db.network_associate(context, project_id, network_id, force=True)
+ @wrap_check_policy
+ def associate(self, context, network_uuid, associations):
+ """Associate or disassociate host or project to network."""
+ network_id = self.get_network(context, network_uuid)['id']
+ if 'host' in associations:
+ host = associations['host']
+ if host is None:
+ self.db.network_disassociate(context, network_id,
+ disassociate_host=True,
+ disassociate_project=False)
+ else:
+ self.db.network_set_host(context, network_id, host)
+ if 'project' in associations:
+ project = associations['project']
+ if project is None:
+ self.db.network_disassociate(context, network_id,
+ disassociate_host=False,
+ disassociate_project=True)
+ else:
+ self.db.network_associate(context, project, network_id, True)
+
def _get_network_by_id(self, context, network_id):
# NOTE(vish): Don't allow access to networks with project_id=None as
# these are networksa that haven't been allocated to a
@@ -2132,24 +2304,30 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Create networks based on parameters."""
self._convert_int_args(kwargs)
+ kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start
+ kwargs["num_networks"] = (kwargs.get("num_networks") or
+ CONF.num_networks)
+ kwargs["network_size"] = (kwargs.get("network_size") or
+ CONF.network_size)
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
- if kwargs['num_networks'] + kwargs['vlan_start'] > 4094:
+ if kwargs["num_networks"] + kwargs["vlan_start"] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
# check that num networks and network size fits in fixed_net
fixed_net = netaddr.IPNetwork(kwargs['cidr'])
- if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
- raise ValueError(_('The network range is not big enough to fit '
- '%(num_networks)s. Network size is %(network_size)s') %
- kwargs)
+ if fixed_net.size < kwargs['num_networks'] * kwargs['network_size']:
+ raise ValueError(_('The network range is not '
+ 'big enough to fit %(num_networks)s networks. Network '
+ 'size is %(network_size)s') % kwargs)
kwargs['bridge_interface'] = (kwargs.get('bridge_interface') or
CONF.vlan_interface)
return NetworkManager.create_networks(
self, context, vpn=True, **kwargs)
+ @lockutils.synchronized('setup_network', 'nova-', external=True)
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
if not network['vpn_public_address']:
@@ -2172,21 +2350,45 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
network['vpn_private_address'])
if not CONF.fake_network:
dev = self.driver.get_dev(network)
- self.driver.update_dhcp(context, dev, network)
+ # NOTE(dprince): dhcp DB queries require elevated context
+ elevated = context.elevated()
+ self.driver.update_dhcp(elevated, dev, network)
if(CONF.use_ipv6):
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
+ @lockutils.synchronized('setup_network', 'nova-', external=True)
def _teardown_network_on_host(self, context, network):
if not CONF.fake_network:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
- self.driver.update_dhcp(context, dev, network)
+ # NOTE(dprince): dhcp DB queries require elevated context
+ elevated = context.elevated()
+ self.driver.update_dhcp(elevated, dev, network)
+
+ # NOTE(ethuleau): For multi hosted networks, if the network is no
+ # more used on this host and if VPN forwarding rule aren't handed
+ # by the host, we delete the network gateway.
+ vpn_address = network['vpn_public_address']
+ if (CONF.teardown_unused_network_gateway and
+ network['multi_host'] and vpn_address != CONF.vpn_ip and
+ not self.db.network_in_use_on_host(context, network['id'],
+ self.host)):
+ LOG.debug("Remove unused gateway %s", network['bridge'])
+ self.driver.kill_dhcp(dev)
+ self.l3driver.remove_gateway(network)
+ if not CONF.share_dhcp_address:
+ values = {'allocated': False,
+ 'host': None}
+ self.db.fixed_ip_update(context, network['dhcp_server'],
+ values)
+ else:
+ self.driver.update_dhcp(context, dev, network)
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(VlanManager, self)._get_network_dict(network)
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index 35e77f63a..c565f368e 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -16,28 +16,36 @@ import os
import shutil
import tempfile
-from nova import config
from nova import exception
-from nova import flags
+from nova.network import dns_driver
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
-CONF = config.CONF
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
-class MiniDNS(object):
- """ Trivial DNS driver. This will read/write to a local, flat file
- and have no effect on your actual DNS system. This class is
- strictly for testing purposes, and should keep you out of dependency
- hell.
- Note that there is almost certainly a race condition here that
- will manifest anytime instances are rapidly created and deleted.
- A proper implementation will need some manner of locking."""
+class MiniDNS(dns_driver.DNSDriver):
+ """
+ Trivial DNS driver. This will read/write to a local, flat file
+ and have no effect on your actual DNS system. This class is
+ strictly for testing purposes, and should keep you out of dependency
+ hell.
+
+ Note that there is almost certainly a race condition here that
+ will manifest anytime instances are rapidly created and deleted.
+ A proper implementation will need some manner of locking.
+ """
def __init__(self):
- if CONF.logdir:
- self.filename = os.path.join(CONF.logdir, "dnstest.txt")
+ if CONF.log_dir:
+ self.filename = os.path.join(CONF.log_dir, "dnstest.txt")
+ self.tempdir = None
else:
- self.filename = "dnstest.txt"
+ self.tempdir = tempfile.mkdtemp()
+ self.filename = os.path.join(self.tempdir, "dnstest.txt")
+ LOG.debug(_('minidns file is |%s|'), self.filename)
if not os.path.exists(self.filename):
f = open(self.filename, "w+")
@@ -49,7 +57,7 @@ class MiniDNS(object):
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
- if entry and entry['address'].lower() == 'domain'.lower():
+ if entry and entry['address'] == 'domain':
entries.append(entry['name'])
infile.close()
return entries
@@ -60,9 +68,11 @@ class MiniDNS(object):
else:
qualified = name
- return qualified
+ return qualified.lower()
def create_entry(self, name, address, type, domain):
+ if name is None:
+ raise exception.InvalidInput(_("Invalid name"))
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
@@ -82,9 +92,9 @@ class MiniDNS(object):
return None
else:
entry = {}
- entry['address'] = vals[0]
- entry['name'] = vals[1]
- entry['type'] = vals[2]
+ entry['address'] = vals[0].lower()
+ entry['name'] = vals[1].lower()
+ entry['type'] = vals[2].lower()
if entry['address'] == 'domain':
entry['domain'] = entry['name']
else:
@@ -92,13 +102,16 @@ class MiniDNS(object):
return entry
def delete_entry(self, name, domain):
+ if name is None:
+ raise exception.InvalidInput(_("Invalid name"))
+
deleted = False
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
- entry['name'] != self.qualify(name, domain).lower()):
+ entry['name'] != self.qualify(name, domain)):
outfile.write(line)
else:
deleted = True
@@ -106,6 +119,8 @@ class MiniDNS(object):
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
+ LOG.warn(_('Cannot delete entry |%s|'),
+ self.qualify(name, domain))
raise exception.NotFound
def modify_address(self, name, address, domain):
@@ -118,7 +133,7 @@ class MiniDNS(object):
for line in infile:
entry = self.parse_line(line)
if (entry and
- entry['name'].lower() == self.qualify(name, domain).lower()):
+ entry['name'] == self.qualify(name, domain)):
outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), entry['type']))
else:
@@ -132,10 +147,12 @@ class MiniDNS(object):
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
- if entry and entry['address'].lower() == address.lower():
- if entry['name'].lower().endswith(domain.lower()):
- domain_index = entry['name'].lower().find(domain.lower())
- entries.append(entry['name'][0:domain_index - 1])
+ if entry and entry['address'] == address.lower():
+ if entry['name'].endswith(domain.lower()):
+ name = entry['name'].split(".")[0]
+ if name not in entries:
+ entries.append(name)
+
infile.close()
return entries
@@ -145,13 +162,23 @@ class MiniDNS(object):
for line in infile:
entry = self.parse_line(line)
if (entry and
- entry['name'].lower() == self.qualify(name, domain).lower()):
+ entry['name'] == self.qualify(name, domain)):
entries.append(entry['address'])
infile.close()
return entries
def delete_dns_file(self):
- os.remove(self.filename)
+ LOG.warn(_("This shouldn't be getting called except during testing."))
+ if os.path.exists(self.filename):
+ try:
+ os.remove(self.filename)
+ except OSError:
+ pass
+ if self.tempdir and os.path.exists(self.tempdir):
+ try:
+ shutil.rmtree(self.tempdir)
+ except OSError:
+ pass
def create_domain(self, fqdomain):
if self.get_entries_by_name(fqdomain, ''):
@@ -169,7 +196,7 @@ class MiniDNS(object):
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
- entry['domain'] != fqdomain):
+ entry['domain'] != fqdomain.lower()):
outfile.write(line)
else:
print "deleted %s" % entry
@@ -178,4 +205,5 @@ class MiniDNS(object):
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
+ LOG.warn(_('Cannot delete domain |%s|'), fqdomain)
raise exception.NotFound
diff --git a/nova/network/model.py b/nova/network/model.py
index fc20489b5..e4fe0d54c 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -25,9 +25,21 @@ def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
+# Constants for the 'vif_type' field in VIF class
+VIF_TYPE_OVS = 'ovs'
+VIF_TYPE_BRIDGE = 'bridge'
+VIF_TYPE_802_QBG = '802.1qbg'
+VIF_TYPE_802_QBH = '802.1qbh'
+VIF_TYPE_OTHER = 'other'
+
+# Constant for max length of network interface names
+# eg 'bridge' in the Network class or 'devname' in
+# the VIF class
+NIC_NAME_LEN = 14
+
class Model(dict):
- """Defines some necessary structures for most of the network models"""
+ """Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
@@ -38,12 +50,12 @@ class Model(dict):
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
- """calls get(key, default) on self['meta']"""
+ """calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
- """Represents an IP address in Nova"""
+ """Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
@@ -78,7 +90,7 @@ class IP(Model):
class FixedIP(IP):
- """Represents a Fixed IP address in Nova"""
+ """Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
@@ -102,7 +114,7 @@ class FixedIP(IP):
class Route(Model):
- """Represents an IP Route in Nova"""
+ """Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
@@ -120,7 +132,7 @@ class Route(Model):
class Subnet(Model):
- """Represents a Subnet in Nova"""
+ """Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
@@ -153,7 +165,7 @@ class Subnet(Model):
self['ips'].append(ip)
def as_netaddr(self):
- """Convience function to get cidr as a netaddr object"""
+ """Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
@@ -167,7 +179,7 @@ class Subnet(Model):
class Network(Model):
- """Represents a Network in Nova"""
+ """Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
@@ -193,13 +205,16 @@ class Network(Model):
class VIF(Model):
- """Represents a Virtual Interface in Nova"""
- def __init__(self, id=None, address=None, network=None, **kwargs):
+ """Represents a Virtual Interface in Nova."""
+ def __init__(self, id=None, address=None, network=None, type=None,
+ devname=None, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
+ self['type'] = type
+ self['devname'] = devname
self._set_meta(kwargs)
@@ -256,16 +271,16 @@ class VIF(Model):
class NetworkInfo(list):
- """Stores and manipulates network information for a Nova instance"""
+ """Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
- """Returns all fixed_ips without floating_ips attached"""
+ """Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
- """Returns all floating_ips"""
+ """Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
@@ -363,6 +378,8 @@ class NetworkInfo(list):
info_dict = {'label': network['label'],
'broadcast': str(subnet_v4.as_netaddr().broadcast),
'mac': vif['address'],
+ 'vif_type': vif['type'],
+ 'vif_devname': vif.get('devname'),
'vif_uuid': vif['id'],
'rxtx_cap': vif.get_meta('rxtx_cap', 0),
'dns': [get_ip(ip) for ip in subnet_v4['dns']],
diff --git a/nova/network/noop_dns_driver.py b/nova/network/noop_dns_driver.py
new file mode 100644
index 000000000..68a1862e6
--- /dev/null
+++ b/nova/network/noop_dns_driver.py
@@ -0,0 +1,49 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.network import dns_driver
+
+
+class NoopDNSDriver(dns_driver.DNSDriver):
+ """No-op DNS manager. Does nothing."""
+
+ def __init__(self):
+ pass
+
+ def get_domains(self):
+ return []
+
+ def create_entry(self, _name, _address, _type, _domain):
+ pass
+
+ def delete_entry(self, _name, _domain):
+ pass
+
+ def modify_address(self, _name, _address, _domain):
+ pass
+
+ def get_entries_by_address(self, _address, _domain):
+ return []
+
+ def get_entries_by_name(self, _name, _domain):
+ return []
+
+ def create_domain(self, _fqdomain):
+ pass
+
+ def delete_domain(self, _fqdomain):
+ pass
diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py
index 6b6897156..5fdb27900 100644
--- a/nova/network/nova_ipam_lib.py
+++ b/nova/network/nova_ipam_lib.py
@@ -69,7 +69,7 @@ class QuantumNovaIPAMLib(object):
return [subnet_v4, subnet_v6]
def get_routes_by_ip_block(self, context, block_id, project_id):
- """Returns the list of routes for the IP block"""
+ """Returns the list of routes for the IP block."""
return []
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
deleted file mode 100644
index b296715c2..000000000
--- a/nova/network/quantum/nova_ipam_lib.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Nicira Networks, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-
-from nova import db
-from nova import exception
-from nova import flags
-from nova import ipv6
-from nova.network import manager
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def get_ipam_lib(net_man):
- return QuantumNovaIPAMLib(net_man)
-
-
-class QuantumNovaIPAMLib(object):
- """Implements Quantum IP Address Management (IPAM) interface
- using the local Nova database. This implementation is inline
- with how IPAM is used by other NetworkManagers.
- """
-
- def __init__(self, net_manager):
- """Holds a reference to the "parent" network manager, used
- to take advantage of various FlatManager methods to avoid
- code duplication.
- """
- self.net_manager = net_manager
-
- def create_subnet(self, context, label, tenant_id,
- quantum_net_id, priority, cidr=None,
- gateway=None, gateway_v6=None, cidr_v6=None,
- dns1=None, dns2=None):
- """Re-use the basic FlatManager create_networks method to
- initialize the networks and fixed_ips tables in Nova DB.
-
- Also stores a few more fields in the networks table that
- are needed by Quantum but not the FlatManager.
- """
- admin_context = context.elevated()
- subnet_size = len(netaddr.IPNetwork(cidr))
- networks = manager.FlatManager._do_create_networks(self.net_manager,
- admin_context, label, cidr,
- False, 1, subnet_size, cidr_v6, gateway,
- gateway_v6, quantum_net_id, None, dns1, dns2,
- ipam=True)
- #TODO(tr3buchet): refactor passing in the ipam key so that
- # it's no longer required. The reason it exists now is because
- # nova insists on carving up IP blocks. What ends up happening is
- # we create a v4 and an identically sized v6 block. The reason
- # the quantum tests passed previosly is nothing prevented an
- # incorrect v6 address from being assigned to the wrong subnet
-
- if len(networks) != 1:
- raise Exception(_("Error creating network entry"))
-
- network = networks[0]
- net = {"project_id": tenant_id,
- "priority": priority,
- "uuid": quantum_net_id}
- db.network_update(admin_context, network['id'], net)
-
- def delete_subnets_by_net_id(self, context, net_id, project_id):
- """Deletes a network based on Quantum UUID. Uses FlatManager
- delete_network to avoid duplication.
- """
- admin_context = context.elevated()
- network = db.network_get_by_uuid(admin_context, net_id)
- if not network:
- raise Exception(_("No network with net_id = %s") % net_id)
- manager.FlatManager.delete_network(self.net_manager,
- admin_context, None,
- network['uuid'],
- require_disassociated=False)
-
- def get_global_networks(self, admin_context):
- return db.project_get_networks(admin_context, None, False)
-
- def get_project_networks(self, admin_context):
- try:
- nets = db.network_get_all(admin_context.elevated())
- except exception.NoNetworksFound:
- return []
- # only return networks with a project_id set
- return [net for net in nets if net['project_id']]
-
- def get_project_and_global_net_ids(self, context, project_id):
- """Fetches all networks associated with this project, or
- that are "global" (i.e., have no project set).
- Returns list sorted by 'priority'.
- """
- admin_context = context.elevated()
- networks = db.project_get_networks(admin_context, project_id, False)
- networks.extend(self.get_global_networks(admin_context))
- id_priority_map = {}
- net_list = []
- for n in networks:
- net_id = n['uuid']
- net_list.append((net_id, n["project_id"]))
- id_priority_map[net_id] = n['priority']
- return sorted(net_list, key=lambda x: id_priority_map[x[0]])
-
- def allocate_fixed_ips(self, context, tenant_id, quantum_net_id,
- network_tenant_id, vif_rec):
- """Allocates a single fixed IPv4 address for a virtual interface."""
- admin_context = context.elevated()
- network = db.network_get_by_uuid(admin_context, quantum_net_id)
- address = None
- if network['cidr']:
- instance = db.instance_get_by_uuid(context,
- vif_rec['instance_uuid'])
- address = db.fixed_ip_associate_pool(admin_context,
- network['id'],
- instance['uuid'])
- values = {'allocated': True,
- 'virtual_interface_id': vif_rec['id']}
- db.fixed_ip_update(admin_context, address, values)
- return [address]
-
- def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id):
- """Returns tenant_id for this network. This is only necessary
- in the melange IPAM case.
- """
- return project_id
-
- def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None):
- """Returns information about the IPv4 and IPv6 subnets
- associated with a Quantum Network UUID.
- """
- n = db.network_get_by_uuid(context.elevated(), net_id)
- subnet_v4 = {
- 'network_id': n['uuid'],
- 'cidr': n['cidr'],
- 'gateway': n['gateway'],
- 'broadcast': n['broadcast'],
- 'netmask': n['netmask'],
- 'version': 4,
- 'dns1': n['dns1'],
- 'dns2': n['dns2']}
- #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
- # this is probably bad as there is no way to add v6
- # dns to nova
- subnet_v6 = {
- 'network_id': n['uuid'],
- 'cidr': n['cidr_v6'],
- 'gateway': n['gateway_v6'],
- 'broadcast': None,
- 'netmask': n['netmask_v6'],
- 'version': 6,
- 'dns1': None,
- 'dns2': None}
- return [subnet_v4, subnet_v6]
-
- def get_routes_by_ip_block(self, context, block_id, project_id):
- """Returns the list of routes for the IP block"""
- return []
-
- def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
- """Returns a list of IPv4 address strings associated with
- the specified virtual interface, based on the fixed_ips table.
- """
- # TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
- vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
- fixed_ips = db.fixed_ips_by_virtual_interface(context,
- vif_rec['id'])
- return [fixed_ip['address'] for fixed_ip in fixed_ips]
-
- def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
- """Returns a list containing a single IPv6 address strings
- associated with the specified virtual interface.
- """
- admin_context = context.elevated()
- network = db.network_get_by_uuid(admin_context, net_id)
- vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
- if network['cidr_v6']:
- ip = ipv6.to_global(network['cidr_v6'],
- vif_rec['address'],
- project_id)
- return [ip]
- return []
-
- def verify_subnet_exists(self, context, tenant_id, quantum_net_id):
- """Confirms that a subnet exists that is associated with the
- specified Quantum Network UUID. Raises an exception if no
- such subnet exists.
- """
- admin_context = context.elevated()
- net = db.network_get_by_uuid(admin_context, quantum_net_id)
- return net is not None
-
- def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
- """Deallocate all fixed IPs associated with the specified
- virtual interface.
- """
- admin_context = context.elevated()
- fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
- vif_ref['id'])
- # NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
- # associated with the instance-id. This prevents us from handing it
- # out again immediately, as allocating it to a new instance before
- # a DHCP lease has timed-out is bad. Instead, the fixed-ip will
- # be disassociated with the instance-id by a call to one of two
- # methods inherited from FlatManager:
- # - if DHCP is in use, a lease expiring in dnsmasq triggers
- # a call to release_fixed_ip in the network manager, or it will
- # be timed out periodically if the lease fails.
- # - otherwise, we release the ip immediately
-
- read_deleted_context = admin_context.elevated(read_deleted='yes')
- for fixed_ip in fixed_ips:
- fixed_id = fixed_ip['id']
- floating_ips = self.net_manager.db.floating_ip_get_by_fixed_ip_id(
- admin_context,
- fixed_id)
- # disassociate floating ips related to fixed_ip
- for floating_ip in floating_ips:
- address = floating_ip['address']
- manager.FloatingIP.disassociate_floating_ip(
- self.net_manager,
- read_deleted_context,
- address,
- affect_auto_assigned=True)
- # deallocate if auto_assigned
- if floating_ip['auto_assigned']:
- manager.FloatingIP.deallocate_floating_ip(
- read_deleted_context,
- address,
- affect_auto_assigned=True)
- db.fixed_ip_update(admin_context, fixed_ip['address'],
- {'allocated': False,
- 'virtual_interface_id': None})
- if not self.net_manager.DHCP:
- db.fixed_ip_disassociate(admin_context, fixed_ip['address'])
-
- if len(fixed_ips) == 0:
- LOG.error(_('No fixed IPs to deallocate for vif %s'),
- vif_ref['id'])
-
- def get_allocated_ips(self, context, subnet_id, project_id):
- """Returns a list of (ip, vif_id) pairs"""
- admin_context = context.elevated()
- ips = db.fixed_ip_get_all(admin_context)
- allocated_ips = []
- # Get all allocated IPs that are part of this subnet
- network = db.network_get_by_uuid(admin_context, subnet_id)
- for ip in ips:
- # Skip unallocated IPs
- if not ip['allocated'] == 1:
- continue
- if ip['network_id'] == network['id']:
- vif = db.virtual_interface_get(admin_context,
- ip['virtual_interface_id'])
- allocated_ips.append((ip['address'], vif['uuid']))
- return allocated_ips
-
- def get_floating_ips_by_fixed_address(self, context, fixed_address):
- return db.floating_ip_get_by_fixed_address(context, fixed_address)
diff --git a/nova/network/quantumv2/__init__.py b/nova/network/quantumv2/__init__.py
index a5fac840c..914600ed8 100644
--- a/nova/network/quantumv2/__init__.py
+++ b/nova/network/quantumv2/__init__.py
@@ -15,15 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from quantumclient import client
from quantumclient.v2_0 import client as clientv20
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -43,8 +41,7 @@ def _get_auth_token():
return httpclient.auth_token
-def get_client(context):
- token = context.auth_token
+def _get_client(token=None):
if not token and CONF.quantum_auth_strategy:
token = _get_auth_token()
params = {
@@ -56,3 +53,11 @@ def get_client(context):
else:
params['auth_strategy'] = None
return clientv20.Client(**params)
+
+
+def get_client(context, admin=False):
+ if admin:
+ token = None
+ else:
+ token = context.auth_token
+ return _get_client(token=token)
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 8c2438669..8347ee94d 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -17,11 +17,9 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from nova.compute import api as compute_api
-from nova import config
from nova.db import base
from nova import exception
-from nova import flags
-from nova.network.api import refresh_cache
+from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.openstack.common import cfg
@@ -50,15 +48,23 @@ quantum_opts = [
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
+ # TODO(berrange) temporary hack until Quantum can pass over the
+ # name of the OVS bridge it is configured with
+ cfg.StrOpt('quantum_ovs_bridge',
+ default='br-int',
+ help='Name of Integration Bridge used by Open vSwitch'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('default_floating_pool', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
+refresh_cache = network_api.refresh_cache
+update_instance_info_cache = network_api.update_instance_cache_with_nw_info
+
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
@@ -98,7 +104,15 @@ class API(base.Base):
return nets
def allocate_for_instance(self, context, instance, **kwargs):
- """Allocate all network resources for the instance."""
+ """Allocate all network resources for the instance.
+
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: QuantumV2 does not yet honour mac address limits.
+ """
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
@@ -127,7 +141,7 @@ class API(base.Base):
created_port_ids = []
for network in nets:
network_id = network['id']
- zone = 'compute:%s' % CONF.node_availability_zone
+ zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
@@ -137,7 +151,8 @@ class API(base.Base):
touched_port_ids.append(port['id'])
else:
if fixed_ips.get(network_id):
- port_req_body['port']['fixed_ip'] = fixed_ip
+ port_req_body['port']['fixed_ips'] = [{'ip_address':
+ fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
@@ -148,7 +163,7 @@ class API(base.Base):
for port_id in touched_port_ids:
port_in_server = quantum.show_port(port_id).get('port')
if not port_in_server:
- raise Exception('Port have already lost')
+ raise Exception(_('Port not found'))
port_req_body = {'port': {'device_id': None}}
quantum.update_port(port_id, port_req_body)
@@ -182,9 +197,12 @@ class API(base.Base):
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
- @refresh_cache
- def get_instance_nw_info(self, context, instance, networks=None):
- return self._get_instance_nw_info(context, instance, networks)
+ def get_instance_nw_info(self, context, instance, networks=None,
+ update_cache=True):
+ result = self._get_instance_nw_info(context, instance, networks)
+ if update_cache:
+ update_instance_info_cache(self, context, instance, result)
+ return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
@@ -287,7 +305,7 @@ class API(base.Base):
def _get_port_id_by_fixed_address(self, client,
instance, address):
- zone = 'compute:%s' % CONF.node_availability_zone
+ zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
@@ -321,10 +339,12 @@ class API(base.Base):
client.update_floatingip(fip['id'], {'floatingip': param})
def get_all(self, context):
- raise NotImplementedError()
+ client = quantumv2.get_client(context)
+ return client.list_networks()
def get(self, context, network_uuid):
- raise NotImplementedError()
+ client = quantumv2.get_client(context)
+ return client.show_network(network_uuid)
def delete(self, context, network_uuid):
raise NotImplementedError()
@@ -426,7 +446,7 @@ class API(base.Base):
return []
def get_instance_id_by_floating_address(self, context, address):
- """Returns the instance id a floating ip's fixed ip is allocated to"""
+ """Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
@@ -472,7 +492,7 @@ class API(base.Base):
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
- """Get floatingip from floating ip address"""
+ """Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
@@ -514,13 +534,13 @@ class API(base.Base):
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
- """Start to migrate the network of an instance"""
+ """Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
- """Finish migrating the network of an instance"""
+ """Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
@@ -532,7 +552,8 @@ class API(base.Base):
def _build_network_info_model(self, context, instance, networks=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
- data = quantumv2.get_client(context).list_ports(**search_opts)
+ data = quantumv2.get_client(context,
+ admin=True).list_ports(**search_opts)
ports = data.get('ports', [])
if not networks:
networks = self._get_available_networks(context,
@@ -562,9 +583,24 @@ class API(base.Base):
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
+ bridge = None
+ vif_type = port.get('binding:vif_type')
+ # TODO(berrange) Quantum should pass the bridge name
+ # in another binding metadata field
+ if vif_type == network_model.VIF_TYPE_OVS:
+ bridge = CONF.quantum_ovs_bridge
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
+ bridge = "brq" + port['network_id']
+
+ if bridge is not None:
+ bridge = bridge[:network_model.NIC_NAME_LEN]
+
+ devname = "tap" + port['id']
+ devname = devname[:network_model.NIC_NAME_LEN]
+
network = network_model.Network(
id=port['network_id'],
- bridge='', # Quantum ignores this field
+ bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
@@ -573,7 +609,9 @@ class API(base.Base):
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
- network=network))
+ network=network,
+ type=port.get('binding:vif_type'),
+ devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index eba0aae52..2f52add57 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -18,13 +18,19 @@
Client side of the network RPC API.
"""
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova.openstack.common.rpc import proxy as rpc_proxy
-CONF = config.CONF
+rpcapi_opts = [
+ cfg.StrOpt('network_topic',
+ default='network',
+ help='the topic network nodes listen on'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(rpcapi_opts)
class NetworkAPI(rpc_proxy.RpcProxy):
@@ -35,6 +41,10 @@ class NetworkAPI(rpc_proxy.RpcProxy):
1.0 - Initial version.
1.1 - Adds migrate_instance_[start|finish]
1.2 - Make migrate_instance_[start|finish] a little more flexible
+ 1.3 - Adds fanout cast update_dns for multi_host networks
+ 1.4 - Add get_backdoor_port()
+ 1.5 - Adds associate
+ 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
'''
#
@@ -104,6 +114,11 @@ class NetworkAPI(rpc_proxy.RpcProxy):
'get_instance_id_by_floating_address',
address=address))
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ topic=rpc.queue_get_for(ctxt, self.topic, host),
+ version='1.4')
+
def get_vifs_by_instance(self, ctxt, instance_id):
# NOTE(vish): When the db calls are converted to store network
# data by instance_uuid, this should pass uuid instead.
@@ -156,6 +171,11 @@ class NetworkAPI(rpc_proxy.RpcProxy):
return self.call(ctxt, self.make_msg('add_network_to_project',
project_id=project_id, network_uuid=network_uuid))
+ def associate(self, ctxt, network_uuid, associations):
+ return self.call(ctxt, self.make_msg('associate',
+ network_uuid=network_uuid, associations=associations),
+ self.topic, version="1.5")
+
def get_instance_nw_info(self, ctxt, instance_id, instance_uuid,
rxtx_factor, host, project_id):
return self.call(ctxt, self.make_msg('get_instance_nw_info',
@@ -237,25 +257,33 @@ class NetworkAPI(rpc_proxy.RpcProxy):
address=address, host=host),
topic=rpc.queue_get_for(ctxt, self.topic, host))
+ def update_dns(self, ctxt, network_ids):
+ return self.fanout_cast(ctxt, self.make_msg('update_dns',
+ network_ids=network_ids), version='1.3')
+
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _associate_floating_ip(self, ctxt, floating_address, fixed_address,
- interface, host):
+ interface, host, instance_uuid=None):
return self.call(ctxt, self.make_msg('_associate_floating_ip',
floating_address=floating_address, fixed_address=fixed_address,
- interface=interface),
- topic=rpc.queue_get_for(ctxt, self.topic, host))
+ interface=interface, instance_uuid=instance_uuid),
+ topic=rpc.queue_get_for(ctxt, self.topic, host),
+ version='1.6')
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
- def _disassociate_floating_ip(self, ctxt, address, interface, host):
+ def _disassociate_floating_ip(self, ctxt, address, interface, host,
+ instance_uuid=None):
return self.call(ctxt, self.make_msg('_disassociate_floating_ip',
- address=address, interface=interface),
- topic=rpc.queue_get_for(ctxt, self.topic, host))
+ address=address, interface=interface,
+ instance_uuid=instance_uuid),
+ topic=rpc.queue_get_for(ctxt, self.topic, host),
+ version='1.6')
def lease_fixed_ip(self, ctxt, address, host):
self.cast(ctxt, self.make_msg('lease_fixed_ip', address=address),
@@ -268,10 +296,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
- if host is not None:
- topic = rpc.queue_get_for(ctxt, self.topic, host)
- else:
- topic = self.topic
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
return self.call(ctxt, self.make_msg(
'migrate_instance_start',
instance_uuid=instance_uuid,
@@ -286,10 +311,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
- if host is not None:
- topic = rpc.queue_get_for(ctxt, self.topic, host)
- else:
- topic = self.topic
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
return self.call(ctxt, self.make_msg(
'migrate_instance_finish',
instance_uuid=instance_uuid,
diff --git a/nova/notifications.py b/nova/notifications.py
index 7153933ee..f399ac55d 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -19,11 +19,10 @@
the system.
"""
-from nova import config
import nova.context
from nova import db
from nova import exception
-from nova import flags
+from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.openstack.common import cfg
@@ -51,7 +50,7 @@ notify_api_faults = cfg.BoolOpt('notify_api_faults', default=False,
'in the API service.')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(notify_state_opt)
CONF.register_opt(notify_any_opt)
CONF.register_opt(notify_api_faults)
@@ -228,7 +227,7 @@ def bandwidth_usage(instance_ref, audit_start,
nw_info = network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
- LOG.exception('Failed to get nw_info', instance=instance_ref)
+ LOG.exception(_('Failed to get nw_info'), instance=instance_ref)
if ignore_missing_network_data:
return
raise
@@ -279,7 +278,7 @@ def info_from_instance(context, instance_ref, network_info,
def null_safe_str(s):
return str(s) if s else ''
- image_ref_url = utils.generate_image_url(instance_ref['image_ref'])
+ image_ref_url = glance.generate_image_url(instance_ref['image_ref'])
instance_type_name = instance_ref.get('instance_type', {}).get('name', '')
@@ -300,6 +299,7 @@ def info_from_instance(context, instance_ref, network_info,
instance_id=instance_ref['uuid'],
display_name=instance_ref['display_name'],
reservation_id=instance_ref['reservation_id'],
+ hostname=instance_ref['hostname'],
# Type properties
instance_type=instance_type_name,
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index 5b4ee916c..6a6d6bf32 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -44,17 +44,16 @@ import urllib
import routes
import webob
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
+from nova import paths
from nova import utils
from nova import wsgi
s3_opts = [
cfg.StrOpt('buckets_path',
- default='$state_path/buckets',
+ default=paths.state_path_def('buckets'),
help='path to s3 buckets'),
cfg.StrOpt('s3_listen',
default="0.0.0.0",
@@ -64,7 +63,7 @@ s3_opts = [
help='port for s3 api to listen'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(s3_opts)
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index 36e5e0ab0..ad1f2a8a6 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -205,27 +205,11 @@ Option values may reference other values using PEP 292 string substitution::
Note that interpolation can be avoided by using '$$'.
-For command line utilities that dispatch to other command line utilities, the
-disable_interspersed_args() method is available. If this this method is called,
-then parsing e.g.::
-
- script --verbose cmd --debug /tmp/mything
-
-will no longer return::
-
- ['cmd', '/tmp/mything']
-
-as the leftover arguments, but will instead return::
-
- ['cmd', '--debug', '/tmp/mything']
-
-i.e. argument parsing is stopped at the first non-option argument.
-
Options may be declared as required so that an error is raised if the user
does not supply a value for the option.
Options may be declared as secret so that their values are not leaked into
-log files:
+log files::
opts = [
cfg.StrOpt('s3_store_access_key', secret=True),
@@ -234,28 +218,50 @@ log files:
]
This module also contains a global instance of the CommonConfigOpts class
-in order to support a common usage pattern in OpenStack:
+in order to support a common usage pattern in OpenStack::
+
+ from nova.openstack.common import cfg
+
+ opts = [
+ cfg.StrOpt('bind_host', default='0.0.0.0'),
+ cfg.IntOpt('bind_port', default=9292),
+ ]
+
+ CONF = cfg.CONF
+ CONF.register_opts(opts)
+
+ def start(server, app):
+ server.start(app, CONF.bind_port, CONF.bind_host)
- from openstack.common import cfg
+Positional command line arguments are supported via a 'positional' Opt
+constructor argument::
- opts = [
- cfg.StrOpt('bind_host' default='0.0.0.0'),
- cfg.IntOpt('bind_port', default=9292),
- ]
+ >>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True))
+ True
+ >>> CONF(['a', 'b'])
+ >>> CONF.bar
+ ['a', 'b']
- CONF = cfg.CONF
- CONF.register_opts(opts)
+It is also possible to use argparse "sub-parsers" to parse additional
+command line arguments using the SubCommandOpt class:
- def start(server, app):
- server.start(app, CONF.bind_port, CONF.bind_host)
+ >>> def add_parsers(subparsers):
+ ... list_action = subparsers.add_parser('list')
+ ... list_action.add_argument('id')
+ ...
+ >>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
+ True
+ >>> CONF(['list', '10'])
+ >>> CONF.action.name, CONF.action.id
+ ('list', '10')
"""
+import argparse
import collections
import copy
import functools
import glob
-import optparse
import os
import string
import sys
@@ -474,6 +480,13 @@ def _is_opt_registered(opts, opt):
return False
+def set_defaults(opts, **kwargs):
+ for opt in opts:
+ if opt.dest in kwargs:
+ opt.default = kwargs[opt.dest]
+ break
+
+
class Opt(object):
"""Base class for all configuration options.
@@ -489,6 +502,8 @@ class Opt(object):
a single character CLI option name
default:
the default value of the option
+ positional:
+ True if the option is a positional CLI argument
metavar:
the name shown as the argument to a CLI option in --help output
help:
@@ -497,8 +512,8 @@ class Opt(object):
multi = False
def __init__(self, name, dest=None, short=None, default=None,
- metavar=None, help=None, secret=False, required=False,
- deprecated_name=None):
+ positional=False, metavar=None, help=None,
+ secret=False, required=False, deprecated_name=None):
"""Construct an Opt object.
The only required parameter is the option's name. However, it is
@@ -508,6 +523,7 @@ class Opt(object):
:param dest: the name of the corresponding ConfigOpts property
:param short: a single character CLI option name
:param default: the default value of the option
+ :param positional: True if the option is a positional CLI argument
:param metavar: the option argument to show in --help
:param help: an explanation of how the option is used
:param secret: true iff the value should be obfuscated in log output
@@ -521,6 +537,7 @@ class Opt(object):
self.dest = dest
self.short = short
self.default = default
+ self.positional = positional
self.metavar = metavar
self.help = help
self.secret = secret
@@ -561,64 +578,73 @@ class Opt(object):
:param parser: the CLI option parser
:param group: an optional OptGroup object
"""
- container = self._get_optparse_container(parser, group)
- kwargs = self._get_optparse_kwargs(group)
- prefix = self._get_optparse_prefix('', group)
- self._add_to_optparse(container, self.name, self.short, kwargs, prefix,
- self.deprecated_name)
+ container = self._get_argparse_container(parser, group)
+ kwargs = self._get_argparse_kwargs(group)
+ prefix = self._get_argparse_prefix('', group)
+ self._add_to_argparse(container, self.name, self.short, kwargs, prefix,
+ self.positional, self.deprecated_name)
- def _add_to_optparse(self, container, name, short, kwargs, prefix='',
- deprecated_name=None):
- """Add an option to an optparse parser or group.
+ def _add_to_argparse(self, container, name, short, kwargs, prefix='',
+ positional=False, deprecated_name=None):
+ """Add an option to an argparse parser or group.
- :param container: an optparse.OptionContainer object
+ :param container: an argparse._ArgumentGroup object
:param name: the opt name
:param short: the short opt name
- :param kwargs: the keyword arguments for add_option()
+ :param kwargs: the keyword arguments for add_argument()
:param prefix: an optional prefix to prepend to the opt name
+ :param position: whether the optional is a positional CLI argument
:raises: DuplicateOptError if a naming confict is detected
"""
- args = ['--' + prefix + name]
+ def hyphen(arg):
+ return arg if not positional else ''
+
+ args = [hyphen('--') + prefix + name]
if short:
- args += ['-' + short]
+ args.append(hyphen('-') + short)
if deprecated_name:
- args += ['--' + prefix + deprecated_name]
- for a in args:
- if container.has_option(a):
- raise DuplicateOptError(a)
- container.add_option(*args, **kwargs)
+ args.append(hyphen('--') + prefix + deprecated_name)
- def _get_optparse_container(self, parser, group):
- """Returns an optparse.OptionContainer.
+ try:
+ container.add_argument(*args, **kwargs)
+ except argparse.ArgumentError as e:
+ raise DuplicateOptError(e)
+
+ def _get_argparse_container(self, parser, group):
+ """Returns an argparse._ArgumentGroup.
- :param parser: an optparse.OptionParser
+ :param parser: an argparse.ArgumentParser
:param group: an (optional) OptGroup object
- :returns: an optparse.OptionGroup if a group is given, else the parser
+ :returns: an argparse._ArgumentGroup if group is given, else parser
"""
if group is not None:
- return group._get_optparse_group(parser)
+ return group._get_argparse_group(parser)
else:
return parser
- def _get_optparse_kwargs(self, group, **kwargs):
- """Build a dict of keyword arguments for optparse's add_option().
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Build a dict of keyword arguments for argparse's add_argument().
Most opt types extend this method to customize the behaviour of the
- options added to optparse.
+ options added to argparse.
:param group: an optional group
:param kwargs: optional keyword arguments to add to
:returns: a dict of keyword arguments
"""
- dest = self.dest
- if group is not None:
- dest = group.name + '_' + dest
- kwargs.update({'dest': dest,
+ if not self.positional:
+ dest = self.dest
+ if group is not None:
+ dest = group.name + '_' + dest
+ kwargs['dest'] = dest
+ else:
+ kwargs['nargs'] = '?'
+ kwargs.update({'default': None,
'metavar': self.metavar,
'help': self.help, })
return kwargs
- def _get_optparse_prefix(self, prefix, group):
+ def _get_argparse_prefix(self, prefix, group):
"""Build a prefix for the CLI option name, if required.
CLI options in a group are prefixed with the group's name in order
@@ -656,6 +682,11 @@ class BoolOpt(Opt):
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
+ def __init__(self, *args, **kwargs):
+ if 'positional' in kwargs:
+ raise ValueError('positional boolean args not supported')
+ super(BoolOpt, self).__init__(*args, **kwargs)
+
def _get_from_config_parser(self, cparser, section):
"""Retrieve the opt value as a boolean from ConfigParser."""
def convert_bool(v):
@@ -671,21 +702,32 @@ class BoolOpt(Opt):
def _add_to_cli(self, parser, group=None):
"""Extends the base class method to add the --nooptname option."""
super(BoolOpt, self)._add_to_cli(parser, group)
- self._add_inverse_to_optparse(parser, group)
+ self._add_inverse_to_argparse(parser, group)
- def _add_inverse_to_optparse(self, parser, group):
+ def _add_inverse_to_argparse(self, parser, group):
"""Add the --nooptname option to the option parser."""
- container = self._get_optparse_container(parser, group)
- kwargs = self._get_optparse_kwargs(group, action='store_false')
- prefix = self._get_optparse_prefix('no', group)
+ container = self._get_argparse_container(parser, group)
+ kwargs = self._get_argparse_kwargs(group, action='store_false')
+ prefix = self._get_argparse_prefix('no', group)
kwargs["help"] = "The inverse of --" + self.name
- self._add_to_optparse(container, self.name, None, kwargs, prefix,
- self.deprecated_name)
+ self._add_to_argparse(container, self.name, None, kwargs, prefix,
+ self.positional, self.deprecated_name)
+
+ def _get_argparse_kwargs(self, group, action='store_true', **kwargs):
+ """Extends the base argparse keyword dict for boolean options."""
+
+ kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs)
- def _get_optparse_kwargs(self, group, action='store_true', **kwargs):
- """Extends the base optparse keyword dict for boolean options."""
- return super(BoolOpt,
- self)._get_optparse_kwargs(group, action=action, **kwargs)
+ # metavar has no effect for BoolOpt
+ if 'metavar' in kwargs:
+ del kwargs['metavar']
+
+ if action != 'store_true':
+ action = 'store_false'
+
+ kwargs['action'] = action
+
+ return kwargs
class IntOpt(Opt):
@@ -697,10 +739,10 @@ class IntOpt(Opt):
return [int(v) for v in self._cparser_get_with_deprecated(cparser,
section)]
- def _get_optparse_kwargs(self, group, **kwargs):
- """Extends the base optparse keyword dict for integer options."""
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for integer options."""
return super(IntOpt,
- self)._get_optparse_kwargs(group, type='int', **kwargs)
+ self)._get_argparse_kwargs(group, type=int, **kwargs)
class FloatOpt(Opt):
@@ -712,10 +754,10 @@ class FloatOpt(Opt):
return [float(v) for v in
self._cparser_get_with_deprecated(cparser, section)]
- def _get_optparse_kwargs(self, group, **kwargs):
- """Extends the base optparse keyword dict for float options."""
- return super(FloatOpt,
- self)._get_optparse_kwargs(group, type='float', **kwargs)
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for float options."""
+ return super(FloatOpt, self)._get_argparse_kwargs(group,
+ type=float, **kwargs)
class ListOpt(Opt):
@@ -725,23 +767,26 @@ class ListOpt(Opt):
is a list containing these strings.
"""
+ class _StoreListAction(argparse.Action):
+ """
+ An argparse action for parsing an option value into a list.
+ """
+ def __call__(self, parser, namespace, values, option_string=None):
+ if values is not None:
+ values = [a.strip() for a in values.split(',')]
+ setattr(namespace, self.dest, values)
+
def _get_from_config_parser(self, cparser, section):
"""Retrieve the opt value as a list from ConfigParser."""
- return [v.split(',') for v in
+ return [[a.strip() for a in v.split(',')] for v in
self._cparser_get_with_deprecated(cparser, section)]
- def _get_optparse_kwargs(self, group, **kwargs):
- """Extends the base optparse keyword dict for list options."""
- return super(ListOpt,
- self)._get_optparse_kwargs(group,
- type='string',
- action='callback',
- callback=self._parse_list,
- **kwargs)
-
- def _parse_list(self, option, opt, value, parser):
- """An optparse callback for parsing an option value into a list."""
- setattr(parser.values, self.dest, value.split(','))
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for list options."""
+ return Opt._get_argparse_kwargs(self,
+ group,
+ action=ListOpt._StoreListAction,
+ **kwargs)
class MultiStrOpt(Opt):
@@ -752,10 +797,14 @@ class MultiStrOpt(Opt):
"""
multi = True
- def _get_optparse_kwargs(self, group, **kwargs):
- """Extends the base optparse keyword dict for multi str options."""
- return super(MultiStrOpt,
- self)._get_optparse_kwargs(group, action='append')
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for multi str options."""
+ kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group)
+ if not self.positional:
+ kwargs['action'] = 'append'
+ else:
+ kwargs['nargs'] = '*'
+ return kwargs
def _cparser_get_with_deprecated(self, cparser, section):
"""If cannot find option as dest try deprecated_name alias."""
@@ -765,6 +814,57 @@ class MultiStrOpt(Opt):
return cparser.get(section, [self.dest], multi=True)
+class SubCommandOpt(Opt):
+
+ """
+ Sub-command options allow argparse sub-parsers to be used to parse
+ additional command line arguments.
+
+ The handler argument to the SubCommandOpt contructor is a callable
+ which is supplied an argparse subparsers object. Use this handler
+ callable to add sub-parsers.
+
+ The opt value is SubCommandAttr object with the name of the chosen
+ sub-parser stored in the 'name' attribute and the values of other
+ sub-parser arguments available as additional attributes.
+ """
+
+ def __init__(self, name, dest=None, handler=None,
+ title=None, description=None, help=None):
+ """Construct an sub-command parsing option.
+
+ This behaves similarly to other Opt sub-classes but adds a
+ 'handler' argument. The handler is a callable which is supplied
+ an subparsers object when invoked. The add_parser() method on
+ this subparsers object can be used to register parsers for
+ sub-commands.
+
+ :param name: the option's name
+ :param dest: the name of the corresponding ConfigOpts property
+ :param title: title of the sub-commands group in help output
+ :param description: description of the group in help output
+ :param help: a help string giving an overview of available sub-commands
+ """
+ super(SubCommandOpt, self).__init__(name, dest=dest, help=help)
+ self.handler = handler
+ self.title = title
+ self.description = description
+
+ def _add_to_cli(self, parser, group=None):
+ """Add argparse sub-parsers and invoke the handler method."""
+ dest = self.dest
+ if group is not None:
+ dest = group.name + '_' + dest
+
+ subparsers = parser.add_subparsers(dest=dest,
+ title=self.title,
+ description=self.description,
+ help=self.help)
+
+ if not self.handler is None:
+ self.handler(subparsers)
+
+
class OptGroup(object):
"""
@@ -800,19 +900,20 @@ class OptGroup(object):
self.help = help
self._opts = {} # dict of dicts of (opt:, override:, default:)
- self._optparse_group = None
+ self._argparse_group = None
- def _register_opt(self, opt):
+ def _register_opt(self, opt, cli=False):
"""Add an opt to this group.
:param opt: an Opt object
+ :param cli: whether this is a CLI option
:returns: False if previously registered, True otherwise
:raises: DuplicateOptError if a naming conflict is detected
"""
if _is_opt_registered(self._opts, opt):
return False
- self._opts[opt.dest] = {'opt': opt}
+ self._opts[opt.dest] = {'opt': opt, 'cli': cli}
return True
@@ -824,16 +925,16 @@ class OptGroup(object):
if opt.dest in self._opts:
del self._opts[opt.dest]
- def _get_optparse_group(self, parser):
- """Build an optparse.OptionGroup for this group."""
- if self._optparse_group is None:
- self._optparse_group = optparse.OptionGroup(parser, self.title,
- self.help)
- return self._optparse_group
+ def _get_argparse_group(self, parser):
+ if self._argparse_group is None:
+ """Build an argparse._ArgumentGroup for this group."""
+ self._argparse_group = parser.add_argument_group(self.title,
+ self.help)
+ return self._argparse_group
def _clear(self):
"""Clear this group's option parsing state."""
- self._optparse_group = None
+ self._argparse_group = None
class ParseError(iniparser.ParseError):
@@ -928,26 +1029,31 @@ class ConfigOpts(collections.Mapping):
self._groups = {}
self._args = None
+
self._oparser = None
self._cparser = None
self._cli_values = {}
self.__cache = {}
self._config_opts = []
- self._disable_interspersed_args = False
- def _setup(self, project, prog, version, usage, default_config_files):
- """Initialize a ConfigOpts object for option parsing."""
+ def _pre_setup(self, project, prog, version, usage, default_config_files):
+ """Initialize a ConfigCliParser object for option parsing."""
+
if prog is None:
prog = os.path.basename(sys.argv[0])
if default_config_files is None:
default_config_files = find_config_files(project, prog)
- self._oparser = optparse.OptionParser(prog=prog,
- version=version,
- usage=usage)
- if self._disable_interspersed_args:
- self._oparser.disable_interspersed_args()
+ self._oparser = argparse.ArgumentParser(prog=prog, usage=usage)
+ self._oparser.add_argument('--version',
+ action='version',
+ version=version)
+
+ return prog, default_config_files
+
+ def _setup(self, project, prog, version, usage, default_config_files):
+ """Initialize a ConfigOpts object for option parsing."""
self._config_opts = [
MultiStrOpt('config-file',
@@ -1017,18 +1123,23 @@ class ConfigOpts(collections.Mapping):
:raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError,
RequiredOptError, DuplicateOptError
"""
+
self.clear()
+ prog, default_config_files = self._pre_setup(project,
+ prog,
+ version,
+ usage,
+ default_config_files)
+
self._setup(project, prog, version, usage, default_config_files)
- self._cli_values, leftovers = self._parse_cli_opts(args)
+ self._cli_values = self._parse_cli_opts(args)
self._parse_config_files()
self._check_required_opts()
- return leftovers
-
def __getattr__(self, name):
"""Look up an option value and perform string substitution.
@@ -1062,17 +1173,21 @@ class ConfigOpts(collections.Mapping):
@__clear_cache
def clear(self):
- """Clear the state of the object to before it was called."""
+ """Clear the state of the object to before it was called.
+
+ Any subparsers added using the add_cli_subparsers() will also be
+ removed as a side-effect of this method.
+ """
self._args = None
self._cli_values.clear()
- self._oparser = None
+ self._oparser = argparse.ArgumentParser()
self._cparser = None
self.unregister_opts(self._config_opts)
for group in self._groups.values():
group._clear()
@__clear_cache
- def register_opt(self, opt, group=None):
+ def register_opt(self, opt, group=None, cli=False):
"""Register an option schema.
Registering an option schema makes any option value which is previously
@@ -1080,17 +1195,19 @@ class ConfigOpts(collections.Mapping):
as an attribute of this object.
:param opt: an instance of an Opt sub-class
+ :param cli: whether this is a CLI option
:param group: an optional OptGroup object or group name
:return: False if the opt was already register, True otherwise
:raises: DuplicateOptError
"""
if group is not None:
- return self._get_group(group, autocreate=True)._register_opt(opt)
+ group = self._get_group(group, autocreate=True)
+ return group._register_opt(opt, cli)
if _is_opt_registered(self._opts, opt):
return False
- self._opts[opt.dest] = {'opt': opt}
+ self._opts[opt.dest] = {'opt': opt, 'cli': cli}
return True
@@ -1116,7 +1233,7 @@ class ConfigOpts(collections.Mapping):
if self._args is not None:
raise ArgsAlreadyParsedError("cannot register CLI option")
- return self.register_opt(opt, group, clear_cache=False)
+ return self.register_opt(opt, group, cli=True, clear_cache=False)
@__clear_cache
def register_cli_opts(self, opts, group=None):
@@ -1243,10 +1360,11 @@ class ConfigOpts(collections.Mapping):
for info in group._opts.values():
yield info, group
- def _all_opts(self):
- """A generator function for iteration opts."""
+ def _all_cli_opts(self):
+ """A generator function for iterating CLI opts."""
for info, group in self._all_opt_infos():
- yield info['opt'], group
+ if info['cli']:
+ yield info['opt'], group
def _unset_defaults_and_overrides(self):
"""Unset any default or override on all options."""
@@ -1254,31 +1372,6 @@ class ConfigOpts(collections.Mapping):
info.pop('default', None)
info.pop('override', None)
- def disable_interspersed_args(self):
- """Set parsing to stop on the first non-option.
-
- If this this method is called, then parsing e.g.
-
- script --verbose cmd --debug /tmp/mything
-
- will no longer return:
-
- ['cmd', '/tmp/mything']
-
- as the leftover arguments, but will instead return:
-
- ['cmd', '--debug', '/tmp/mything']
-
- i.e. argument parsing is stopped at the first non-option argument.
- """
- self._disable_interspersed_args = True
-
- def enable_interspersed_args(self):
- """Set parsing to not stop on the first non-option.
-
- This it the default behaviour."""
- self._disable_interspersed_args = False
-
def find_file(self, name):
"""Locate a file located alongside the config files.
@@ -1377,6 +1470,9 @@ class ConfigOpts(collections.Mapping):
info = self._get_opt_info(name, group)
opt = info['opt']
+ if isinstance(opt, SubCommandOpt):
+ return self.SubCommandAttr(self, group, opt.dest)
+
if 'override' in info:
return info['override']
@@ -1401,6 +1497,10 @@ class ConfigOpts(collections.Mapping):
if not opt.multi:
return value
+ # argparse ignores default=None for nargs='*'
+ if opt.positional and not value:
+ value = opt.default
+
return value + values
if values:
@@ -1507,7 +1607,7 @@ class ConfigOpts(collections.Mapping):
if ('default' in info or 'override' in info):
continue
- if self._get(opt.name, group) is None:
+ if self._get(opt.dest, group) is None:
raise RequiredOptError(opt.name, group)
def _parse_cli_opts(self, args):
@@ -1523,12 +1623,10 @@ class ConfigOpts(collections.Mapping):
"""
self._args = args
- for opt, group in self._all_opts():
+ for opt, group in self._all_cli_opts():
opt._add_to_cli(self._oparser, group)
- values, leftovers = self._oparser.parse_args(args)
-
- return vars(values), leftovers
+ return vars(self._oparser.parse_args(args))
class GroupAttr(collections.Mapping):
@@ -1543,12 +1641,12 @@ class ConfigOpts(collections.Mapping):
:param conf: a ConfigOpts object
:param group: an OptGroup object
"""
- self.conf = conf
- self.group = group
+ self._conf = conf
+ self._group = group
def __getattr__(self, name):
"""Look up an option value and perform template substitution."""
- return self.conf._get(name, self.group)
+ return self._conf._get(name, self._group)
def __getitem__(self, key):
"""Look up an option value and perform string substitution."""
@@ -1556,16 +1654,50 @@ class ConfigOpts(collections.Mapping):
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
- return key in self.group._opts
+ return key in self._group._opts
def __iter__(self):
"""Iterate over all registered opt and group names."""
- for key in self.group._opts.keys():
+ for key in self._group._opts.keys():
yield key
def __len__(self):
"""Return the number of options and option groups."""
- return len(self.group._opts)
+ return len(self._group._opts)
+
+ class SubCommandAttr(object):
+
+ """
+ A helper class representing the name and arguments of an argparse
+ sub-parser.
+ """
+
+ def __init__(self, conf, group, dest):
+ """Construct a SubCommandAttr object.
+
+ :param conf: a ConfigOpts object
+ :param group: an OptGroup object
+ :param dest: the name of the sub-parser
+ """
+ self._conf = conf
+ self._group = group
+ self._dest = dest
+
+ def __getattr__(self, name):
+ """Look up a sub-parser name or argument value."""
+ if name == 'name':
+ name = self._dest
+ if self._group is not None:
+ name = self._group.name + '_' + name
+ return self._conf._cli_values[name]
+
+ if name in self._conf:
+ raise DuplicateOptError(name)
+
+ try:
+ return self._conf._cli_values[name]
+ except KeyError:
+ raise NoSuchOptError(name)
class StrSubWrapper(object):
@@ -1623,19 +1755,21 @@ class CommonConfigOpts(ConfigOpts):
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
- 'Default: %default'),
+ 'Default: %(default)s'),
StrOpt('log-date-format',
default=DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
- help='Format string for %(asctime)s in log records. '
- 'Default: %default'),
+ help='Format string for %%(asctime)s in log records. '
+ 'Default: %(default)s'),
StrOpt('log-file',
metavar='PATH',
+ deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If not set, logging will go to stdout.'),
StrOpt('log-dir',
+ deprecated_name='logdir',
help='(Optional) The directory to keep log files in '
- '(will be prepended to --logfile)'),
+ '(will be prepended to --log-file)'),
BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
diff --git a/nova/openstack/common/cliutils.py b/nova/openstack/common/cliutils.py
new file mode 100644
index 000000000..8f4dc44dd
--- /dev/null
+++ b/nova/openstack/common/cliutils.py
@@ -0,0 +1,66 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import inspect
+import string
+
+
+class MissingArgs(Exception):
+
+ def __init__(self, missing):
+ self.missing = missing
+
+ def __str__(self):
+ if len(self.missing) == 1:
+ return ("An argument is missing: %(missing)s" %
+ dict(missing=self.missing[0]))
+ else:
+ return ("%(num)d arguments are missing: %(missing)s" %
+ dict(num=len(self.missing),
+ missing=string.join(self.missing, ', ')))
+
+
+def validate_args(fn, *args, **kwargs):
+ """Check that the supplied args are sufficient for calling a function.
+
+ >>> validate_args(lambda a: None)
+ Traceback (most recent call last):
+ ...
+ MissingArgs: An argument is missing: a
+ >>> validate_args(lambda a, b, c, d: None, 0, c=1)
+ Traceback (most recent call last):
+ ...
+ MissingArgs: 2 arguments are missing: b, d
+
+ :param fn: the function to check
+ :param arg: the positional arguments supplied
+ :param kwargs: the keyword arguments supplied
+ """
+ argspec = inspect.getargspec(fn)
+
+ num_defaults = len(argspec.defaults or [])
+ required_args = argspec.args[:len(argspec.args) - num_defaults]
+
+ def isbound(method):
+ return getattr(method, 'im_self', None) is not None
+
+ if isbound(fn):
+ required_args.pop(0)
+
+ missing = [arg for arg in required_args if arg not in kwargs]
+ missing = missing[len(args):]
+ if missing:
+ raise MissingArgs(missing)
diff --git a/nova/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index 4620d76ac..f18e84f6d 100644
--- a/nova/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2012 OpenStack, LLC.
+# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
@@ -25,46 +25,43 @@ import eventlet
import eventlet.backdoor
import greenlet
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
- ]
+]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
-def dont_use_this():
+def _dont_use_this():
print "Don't use this, just disconnect instead"
-def find_objects(t):
+def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
-def print_greenthreads():
+def _print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
-backdoor_locals = {
- 'exit': dont_use_this, # So we don't exit the entire process
- 'quit': dont_use_this, # So we don't exit the entire process
- 'fo': find_objects,
- 'pgt': print_greenthreads,
-}
-
-
def initialize_if_enabled():
+ backdoor_locals = {
+ 'exit': _dont_use_this, # So we don't exit the entire process
+ 'quit': _dont_use_this, # So we don't exit the entire process
+ 'fo': _find_objects,
+ 'pgt': _print_greenthreads,
+ }
+
if CONF.backdoor_port is None:
- return
+ return None
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
@@ -76,6 +73,8 @@ def initialize_if_enabled():
pprint.pprint(val)
sys.displayhook = displayhook
- eventlet.spawn(eventlet.backdoor.backdoor_server,
- eventlet.listen(('localhost', CONF.backdoor_port)),
- locals=backdoor_locals)
+ sock = eventlet.listen(('localhost', CONF.backdoor_port))
+ port = sock.getsockname()[1]
+ eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
+ locals=backdoor_locals)
+ return port
diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py
index 235350cc4..d52309e62 100644
--- a/nova/openstack/common/gettextutils.py
+++ b/nova/openstack/common/gettextutils.py
@@ -20,7 +20,7 @@ gettext for openstack-common modules.
Usual usage in an openstack.common module:
- from openstack.common.gettextutils import _
+ from nova.openstack.common.gettextutils import _
"""
import gettext
diff --git a/nova/openstack/common/lockutils.py b/nova/openstack/common/lockutils.py
index 2840ce6f7..ba390dc69 100644
--- a/nova/openstack/common/lockutils.py
+++ b/nova/openstack/common/lockutils.py
@@ -24,7 +24,6 @@ import tempfile
import time
import weakref
-from eventlet import greenthread
from eventlet import semaphore
from nova.openstack.common import cfg
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index 67a06a7af..6e25bb597 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -49,19 +49,20 @@ from nova.openstack.common import notifier
log_opts = [
cfg.StrOpt('logging_context_format_string',
- default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
- '%(user_id)s %(project_id)s] %(instance)s'
+ default='%(asctime)s.%(msecs)d %(levelname)s %(name)s '
+ '[%(request_id)s %(user)s %(tenant)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
- default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
- ' %(instance)s%(message)s',
+ default='%(asctime)s.%(msecs)d %(process)d %(levelname)s '
+ '%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
- default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
+ default='%(asctime)s.%(msecs)d %(process)d TRACE %(name)s '
+ '%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
@@ -174,7 +175,7 @@ class ContextAdapter(logging.LoggerAdapter):
self.log(logging.AUDIT, msg, *args, **kwargs)
def deprecated(self, msg, *args, **kwargs):
- stdmsg = _("Deprecated Config: %s") % msg
+ stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
@@ -289,6 +290,12 @@ def setup(product_name):
_setup_logging_from_conf(product_name)
+def set_defaults(logging_context_format_string):
+ cfg.set_defaults(log_opts,
+ logging_context_format_string=
+ logging_context_format_string)
+
+
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
diff --git a/nova/openstack/common/notifier/api.py b/nova/openstack/common/notifier/api.py
index 76b725c0a..0ec55fbf0 100644
--- a/nova/openstack/common/notifier/api.py
+++ b/nova/openstack/common/notifier/api.py
@@ -137,10 +137,11 @@ def notify(context, publisher_id, event_type, priority, payload):
for driver in _get_drivers():
try:
driver.notify(context, msg)
- except Exception, e:
+ except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
- "Payload=%(payload)s") % locals())
+ "Payload=%(payload)s")
+ % dict(e=e, payload=payload))
_drivers = None
@@ -166,7 +167,7 @@ def add_driver(notification_driver):
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
- except ImportError as e:
+ except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
diff --git a/nova/openstack/common/notifier/rabbit_notifier.py b/nova/openstack/common/notifier/rabbit_notifier.py
index c7b3f54fe..11067fb0a 100644
--- a/nova/openstack/common/notifier/rabbit_notifier.py
+++ b/nova/openstack/common/notifier/rabbit_notifier.py
@@ -1,4 +1,4 @@
-# Copyright 2011 OpenStack LLC.
+# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,33 +14,16 @@
# under the License.
-from nova.openstack.common import cfg
-from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
+from nova.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
-notification_topic_opt = cfg.ListOpt(
- 'notification_topics', default=['notifications', ],
- help='AMQP topic used for openstack notifications')
-
-CONF = cfg.CONF
-CONF.register_opt(notification_topic_opt)
-
def notify(context, message):
- """Sends a notification to the RabbitMQ"""
- if not context:
- context = req_context.get_admin_context()
- priority = message.get('priority',
- CONF.default_notification_level)
- priority = priority.lower()
- for topic in CONF.notification_topics:
- topic = '%s.%s' % (topic, priority)
- try:
- rpc.notify(context, topic, message)
- except Exception, e:
- LOG.exception(_("Could not send notification to %(topic)s. "
- "Payload=%(message)s"), locals())
+ """Deprecated in Grizzly. Please use rpc_notifier instead."""
+
+ LOG.deprecated(_("The rabbit_notifier is now deprecated."
+ " Please use rpc_notifier instead."))
+ rpc_notifier.notify(context, message)
diff --git a/nova/openstack/common/notifier/rpc_notifier.py b/nova/openstack/common/notifier/rpc_notifier.py
new file mode 100644
index 000000000..8316f7ab7
--- /dev/null
+++ b/nova/openstack/common/notifier/rpc_notifier.py
@@ -0,0 +1,46 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.openstack.common import cfg
+from nova.openstack.common import context as req_context
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+
+LOG = logging.getLogger(__name__)
+
+notification_topic_opt = cfg.ListOpt(
+ 'notification_topics', default=['notifications', ],
+ help='AMQP topic used for openstack notifications')
+
+CONF = cfg.CONF
+CONF.register_opt(notification_topic_opt)
+
+
+def notify(context, message):
+ """Sends a notification via RPC"""
+ if not context:
+ context = req_context.get_admin_context()
+ priority = message.get('priority',
+ CONF.default_notification_level)
+ priority = priority.lower()
+ for topic in CONF.notification_topics:
+ topic = '%s.%s' % (topic, priority)
+ try:
+ rpc.notify(context, topic, message)
+ except Exception:
+ LOG.exception(_("Could not send notification to %(topic)s. "
+ "Payload=%(message)s"), locals())
diff --git a/nova/openstack/common/notifier/rpc_notifier2.py b/nova/openstack/common/notifier/rpc_notifier2.py
new file mode 100644
index 000000000..beb2e9f71
--- /dev/null
+++ b/nova/openstack/common/notifier/rpc_notifier2.py
@@ -0,0 +1,51 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''messaging based notification driver, with message envelopes'''
+
+from nova.openstack.common import cfg
+from nova.openstack.common import context as req_context
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+
+LOG = logging.getLogger(__name__)
+
+notification_topic_opt = cfg.ListOpt(
+ 'topics', default=['notifications', ],
+ help='AMQP topic(s) used for openstack notifications')
+
+opt_group = cfg.OptGroup(name='rpc_notifier2',
+ title='Options for rpc_notifier2')
+
+CONF = cfg.CONF
+CONF.register_group(opt_group)
+CONF.register_opt(notification_topic_opt, opt_group)
+
+
+def notify(context, message):
+ """Sends a notification via RPC"""
+ if not context:
+ context = req_context.get_admin_context()
+ priority = message.get('priority',
+ CONF.default_notification_level)
+ priority = priority.lower()
+ for topic in CONF.rpc_notifier2.topics:
+ topic = '%s.%s' % (topic, priority)
+ try:
+ rpc.notify(context, topic, message, envelope=True)
+ except Exception:
+ LOG.exception(_("Could not send notification to %(topic)s. "
+ "Payload=%(message)s"), locals())
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index bf2b2e9e0..d43b48fa2 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -50,25 +50,26 @@ rpc_opts = [
default=['nova.openstack.common.exception',
'nova.exception',
'cinder.exception',
+ 'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
- #
- # The following options are not registered here, but are expected to be
- # present. The project using this library must register these options with
- # the configuration so that project-specific defaults may be defined.
- #
- #cfg.StrOpt('control_exchange',
- # default='nova',
- # help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
+ cfg.StrOpt('control_exchange',
+ default='openstack',
+ help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
cfg.CONF.register_opts(rpc_opts)
+def set_defaults(control_exchange):
+ cfg.set_defaults(rpc_opts,
+ control_exchange=control_exchange)
+
+
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
@@ -177,17 +178,18 @@ def multicall(context, topic, msg, timeout=None):
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
-def notify(context, topic, msg):
+def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
+ :param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
- return _get_impl().notify(cfg.CONF, context, topic, msg)
+ return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py
index a88408437..105e6fcbe 100644
--- a/nova/openstack/common/rpc/amqp.py
+++ b/nova/openstack/common/rpc/amqp.py
@@ -26,7 +26,6 @@ AMQP, but is deprecated and predates this code.
"""
import inspect
-import logging
import sys
import uuid
@@ -34,10 +33,10 @@ from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
+from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
@@ -55,7 +54,7 @@ class Pool(pools.Pool):
# TODO(comstud): Timeout connections not used in a while
def create(self):
- LOG.debug('Pool creating new connection')
+ LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
@@ -150,7 +149,7 @@ class ConnectionContext(rpc_common.Connection):
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
- ending=False):
+ ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
@@ -158,7 +157,8 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
- failure = rpc_common.serialize_remote_exception(failure)
+ failure = rpc_common.serialize_remote_exception(failure,
+ log_failure)
try:
msg = {'result': reply, 'failure': failure}
@@ -168,7 +168,7 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
'failure': failure}
if ending:
msg['ending'] = True
- conn.direct_send(msg_id, msg)
+ conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
@@ -185,10 +185,10 @@ class RpcContext(rpc_common.CommonRpcContext):
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
- connection_pool=None):
+ connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
- ending)
+ ending, log_failure)
if ending:
self.msg_id = None
@@ -282,11 +282,21 @@ class ProxyCallback(object):
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
- except Exception as e:
- LOG.exception('Exception during message handling')
+ except rpc_common.ClientException as e:
+ LOG.debug(_('Expected exception during message handling (%s)') %
+ e._exc_info[1])
+ ctxt.reply(None, e._exc_info,
+ connection_pool=self.connection_pool,
+ log_failure=False)
+ except Exception:
+ LOG.exception(_('Exception during message handling'))
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
+ def wait(self):
+ """Wait for all callback threads to exit."""
+ self.pool.waitall()
+
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
@@ -349,7 +359,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
- LOG.debug(_('Making asynchronous call on %s ...'), topic)
+ LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
@@ -358,7 +368,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
- conn.topic_send(topic, msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg))
return wait_msg
@@ -377,7 +387,7 @@ def cast(conf, context, topic, msg, connection_pool):
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
- conn.topic_send(topic, msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
@@ -385,7 +395,7 @@ def fanout_cast(conf, context, topic, msg, connection_pool):
LOG.debug(_('Making asynchronous fanout cast...'))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
- conn.fanout_send(topic, msg)
+ conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
@@ -393,7 +403,7 @@ def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
- conn.topic_send(topic, msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
@@ -402,15 +412,18 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
- conn.fanout_send(topic, msg)
+ conn.fanout_send(topic, rpc_common.serialize_msg(msg))
-def notify(conf, context, topic, msg, connection_pool):
+def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
- event_type = msg.get('event_type')
- LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals())
+ LOG.debug(_('Sending %(event_type)s on %(topic)s'),
+ dict(event_type=msg.get('event_type'),
+ topic=topic))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
+ if envelope:
+ msg = rpc_common.serialize_msg(msg, force_envelope=True)
conn.notify_send(topic, msg)
@@ -420,7 +433,4 @@ def cleanup(connection_pool):
def get_control_exchange(conf):
- try:
- return conf.control_exchange
- except cfg.NoSuchOptError:
- return 'openstack'
+ return conf.control_exchange
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index eb3416804..bf4f5a3de 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -18,18 +18,61 @@
# under the License.
import copy
-import logging
+import sys
import traceback
+from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
+from nova.openstack.common import log as logging
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+'''RPC Envelope Version.
+
+This version number applies to the top level structure of messages sent out.
+It does *not* apply to the message payload, which must be versioned
+independently. For example, when using rpc APIs, a version number is applied
+for changes to the API being exposed over rpc. This version number is handled
+in the rpc proxy and dispatcher modules.
+
+This version number applies to the message envelope that is used in the
+serialization done inside the rpc layer. See serialize_msg() and
+deserialize_msg().
+
+The current message format (version 2.0) is very simple. It is:
+
+ {
+ 'nova.version': <RPC Envelope Version as a String>,
+ 'nova.message': <Application Message Payload, JSON encoded>
+ }
+
+Message format version '1.0' is just considered to be the messages we sent
+without a message envelope.
+
+So, the current message envelope just includes the envelope version. It may
+eventually contain additional information, such as a signature for the message
+payload.
+
+We will JSON encode the application message payload. The message envelope,
+which includes the JSON encoded application message body, will be passed down
+to the messaging libraries as a dict.
+'''
+_RPC_ENVELOPE_VERSION = '2.0'
+
+_VERSION_KEY = 'nova.version'
+_MESSAGE_KEY = 'nova.message'
+
+
+# TODO(russellb) Turn this on after Grizzly.
+_SEND_RPC_ENVELOPE = False
+
+
class RPCException(Exception):
message = _("An unknown RPC related exception occurred.")
@@ -40,7 +83,7 @@ class RPCException(Exception):
try:
message = self.message % kwargs
- except Exception as e:
+ except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
@@ -90,6 +133,11 @@ class UnsupportedRpcVersion(RPCException):
"this endpoint.")
+class UnsupportedRpcEnvelopeVersion(RPCException):
+ message = _("Specified RPC envelope version, %(version)s, "
+ "not supported by this endpoint.")
+
+
class Connection(object):
"""A connection, returned by rpc.create_connection().
@@ -164,8 +212,12 @@ class Connection(object):
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
- SANITIZE = {'set_admin_password': ('new_pass',),
- 'run_instance': ('admin_password',), }
+ SANITIZE = {'set_admin_password': [('args', 'new_pass')],
+ 'run_instance': [('args', 'admin_password')],
+ 'route_message': [('args', 'message', 'args', 'method_info',
+ 'method_kwargs', 'password'),
+ ('args', 'message', 'args', 'method_info',
+ 'method_kwargs', 'admin_password')]}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
has_context_token = '_context_auth_token' in msg_data
@@ -177,14 +229,16 @@ def _safe_log(log_func, msg, msg_data):
msg_data = copy.deepcopy(msg_data)
if has_method:
- method = msg_data['method']
- if method in SANITIZE:
- args_to_sanitize = SANITIZE[method]
- for arg in args_to_sanitize:
- try:
- msg_data['args'][arg] = "<SANITIZED>"
- except KeyError:
- pass
+ for arg in SANITIZE.get(msg_data['method'], []):
+ try:
+ d = msg_data
+ for elem in arg[:-1]:
+ d = d[elem]
+ d[arg[-1]] = '<SANITIZED>'
+ except KeyError, e:
+ LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
+ {'item': arg,
+ 'err': e})
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
@@ -195,7 +249,7 @@ def _safe_log(log_func, msg, msg_data):
return log_func(msg, msg_data)
-def serialize_remote_exception(failure_info):
+def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
@@ -203,8 +257,9 @@ def serialize_remote_exception(failure_info):
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
- LOG.error(_("Returning exception %s to caller"), unicode(failure))
- LOG.error(tb)
+ if log_failure:
+ LOG.error(_("Returning exception %s to caller"), unicode(failure))
+ LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
@@ -258,7 +313,7 @@ def deserialize_remote_exception(conf, data):
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
- except TypeError as e:
+ except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
@@ -309,3 +364,107 @@ class CommonRpcContext(object):
context.values['read_deleted'] = read_deleted
return context
+
+
+class ClientException(Exception):
+ """This encapsulates some actual exception that is expected to be
+ hit by an RPC proxy object. Merely instantiating it records the
+ current exception information, which will be passed back to the
+ RPC client without exceptional logging."""
+ def __init__(self):
+ self._exc_info = sys.exc_info()
+
+
+def catch_client_exception(exceptions, func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception, e:
+ if type(e) in exceptions:
+ raise ClientException()
+ else:
+ raise
+
+
+def client_exceptions(*exceptions):
+ """Decorator for manager methods that raise expected exceptions.
+ Marking a Manager method with this decorator allows the declaration
+ of expected exceptions that the RPC layer should not consider fatal,
+ and not log as if they were generated in a real error scenario. Note
+ that this will cause listed exceptions to be wrapped in a
+ ClientException, which is used internally by the RPC layer."""
+ def outer(func):
+ def inner(*args, **kwargs):
+ return catch_client_exception(exceptions, func, *args, **kwargs)
+ return inner
+ return outer
+
+
+def version_is_compatible(imp_version, version):
+ """Determine whether versions are compatible.
+
+ :param imp_version: The version implemented
+ :param version: The version requested by an incoming message.
+ """
+ version_parts = version.split('.')
+ imp_version_parts = imp_version.split('.')
+ if int(version_parts[0]) != int(imp_version_parts[0]): # Major
+ return False
+ if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
+ return False
+ return True
+
+
+def serialize_msg(raw_msg, force_envelope=False):
+ if not _SEND_RPC_ENVELOPE and not force_envelope:
+ return raw_msg
+
+ # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
+ # information about this format.
+ msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
+ _MESSAGE_KEY: jsonutils.dumps(raw_msg)}
+
+ return msg
+
+
+def deserialize_msg(msg):
+ # NOTE(russellb): Hang on to your hats, this road is about to
+ # get a little bumpy.
+ #
+ # Robustness Principle:
+ # "Be strict in what you send, liberal in what you accept."
+ #
+ # At this point we have to do a bit of guessing about what it
+ # is we just received. Here is the set of possibilities:
+ #
+ # 1) We received a dict. This could be 2 things:
+ #
+ # a) Inspect it to see if it looks like a standard message envelope.
+ # If so, great!
+ #
+ # b) If it doesn't look like a standard message envelope, it could either
+ # be a notification, or a message from before we added a message
+ # envelope (referred to as version 1.0).
+ # Just return the message as-is.
+ #
+ # 2) It's any other non-dict type. Just return it and hope for the best.
+ # This case covers return values from rpc.call() from before message
+ # envelopes were used. (messages to call a method were always a dict)
+
+ if not isinstance(msg, dict):
+ # See #2 above.
+ return msg
+
+ base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
+ if not all(map(lambda key: key in msg, base_envelope_keys)):
+ # See #1.b above.
+ return msg
+
+ # At this point we think we have the message envelope
+ # format we were expecting. (#1.a above)
+
+ if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
+ raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
+
+ raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
+
+ return raw_msg
diff --git a/nova/openstack/common/rpc/dispatcher.py b/nova/openstack/common/rpc/dispatcher.py
index 34c2954db..5f4cc3a7c 100644
--- a/nova/openstack/common/rpc/dispatcher.py
+++ b/nova/openstack/common/rpc/dispatcher.py
@@ -41,8 +41,8 @@ server side of the API at the same time. However, as the code stands today,
there can be both versioned and unversioned APIs implemented in the same code
base.
-
-EXAMPLES:
+EXAMPLES
+========
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
API as an example. The client side is in nova/compute/rpcapi.py and the server
@@ -50,12 +50,13 @@ side is in nova/compute/manager.py.
Example 1) Adding a new method.
+-------------------------------
Adding a new method is a backwards compatible change. It should be added to
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
have a specific version specified to indicate the minimum API version that must
-be implemented for the method to be supported. For example:
+be implemented for the method to be supported. For example::
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
@@ -67,10 +68,11 @@ get_host_uptime() method.
Example 2) Adding a new parameter.
+----------------------------------
Adding a new parameter to an rpc method can be made backwards compatible. The
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
-The implementation of the method must not expect the parameter to be present.
+The implementation of the method must not expect the parameter to be present.::
def some_remote_method(self, arg1, arg2, newarg=None):
# The code needs to deal with newarg=None for cases
@@ -101,21 +103,6 @@ class RpcDispatcher(object):
self.callbacks = callbacks
super(RpcDispatcher, self).__init__()
- @staticmethod
- def _is_compatible(mversion, version):
- """Determine whether versions are compatible.
-
- :param mversion: The API version implemented by a callback.
- :param version: The API version requested by an incoming message.
- """
- version_parts = version.split('.')
- mversion_parts = mversion.split('.')
- if int(version_parts[0]) != int(mversion_parts[0]): # Major
- return False
- if int(version_parts[1]) > int(mversion_parts[1]): # Minor
- return False
- return True
-
def dispatch(self, ctxt, version, method, **kwargs):
"""Dispatch a message based on a requested version.
@@ -137,7 +124,8 @@ class RpcDispatcher(object):
rpc_api_version = proxyobj.RPC_API_VERSION
else:
rpc_api_version = '1.0'
- is_compatible = self._is_compatible(rpc_api_version, version)
+ is_compatible = rpc_common.version_is_compatible(rpc_api_version,
+ version)
had_compatible = had_compatible or is_compatible
if not hasattr(proxyobj, method):
continue
diff --git a/nova/openstack/common/rpc/impl_fake.py b/nova/openstack/common/rpc/impl_fake.py
index 8db0da015..4d133a1af 100644
--- a/nova/openstack/common/rpc/impl_fake.py
+++ b/nova/openstack/common/rpc/impl_fake.py
@@ -18,11 +18,15 @@ queues. Casts will block, but this is very useful for tests.
"""
import inspect
+# NOTE(russellb): We specifically want to use json, not our own jsonutils.
+# jsonutils has some extra logic to automatically convert objects to primitive
+# types so that they can be serialized. We want to catch all cases where
+# non-primitive types make it into this code and treat it as an error.
+import json
import time
import eventlet
-from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
@@ -75,6 +79,8 @@ class Consumer(object):
else:
res.append(rval)
done.send(res)
+ except rpc_common.ClientException as e:
+ done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
@@ -121,7 +127,7 @@ def create_connection(conf, new=True):
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
- jsonutils.dumps(msg)
+ json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
@@ -154,6 +160,7 @@ def call(conf, context, topic, msg, timeout=None):
def cast(conf, context, topic, msg):
+ check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 46295d90f..bf38201f5 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -162,7 +162,8 @@ class ConsumerBase(object):
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
- callback(message.payload)
+ msg = rpc_common.deserialize_msg(message.payload)
+ callback(msg)
message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
@@ -196,7 +197,7 @@ class DirectConsumer(ConsumerBase):
# Default options
options = {'durable': False,
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
@@ -269,7 +270,7 @@ class FanoutConsumer(ConsumerBase):
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
@@ -316,7 +317,7 @@ class DirectPublisher(Publisher):
options = {'durable': False,
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
@@ -350,7 +351,7 @@ class FanoutPublisher(Publisher):
"""
options = {'durable': False,
'auto_delete': True,
- 'exclusive': True}
+ 'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
@@ -387,6 +388,7 @@ class Connection(object):
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
+ self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
@@ -409,18 +411,18 @@ class Connection(object):
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
- params = {}
+ params = {
+ 'hostname': hostname,
+ 'port': port,
+ 'userid': self.conf.rabbit_userid,
+ 'password': self.conf.rabbit_password,
+ 'virtual_host': self.conf.rabbit_virtual_host,
+ }
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
- params.setdefault('hostname', hostname)
- params.setdefault('port', port)
- params.setdefault('userid', self.conf.rabbit_userid)
- params.setdefault('password', self.conf.rabbit_password)
- params.setdefault('virtual_host', self.conf.rabbit_virtual_host)
-
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
@@ -469,7 +471,7 @@ class Connection(object):
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
- self.connection.close()
+ self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
@@ -573,12 +575,14 @@ class Connection(object):
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
@@ -644,6 +648,11 @@ class Connection(object):
pass
self.consumer_thread = None
+ def wait_on_proxy_callbacks(self):
+ """Wait for all proxy callback threads to exit."""
+ for proxy_cb in self.proxy_callbacks:
+ proxy_cb.wait()
+
def publisher_send(self, cls, topic, msg, **kwargs):
"""Send to a publisher based on the publisher class"""
@@ -719,6 +728,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
@@ -730,6 +740,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
@@ -782,11 +793,12 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg):
rpc_amqp.get_connection_pool(conf, Connection))
-def notify(conf, context, topic, msg):
+def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
+ rpc_amqp.get_connection_pool(conf, Connection),
+ envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index 70a03c5bf..2e05f02f1 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -17,7 +17,6 @@
import functools
import itertools
-import logging
import time
import uuid
@@ -29,6 +28,7 @@ import qpid.messaging.exceptions
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
from nova.openstack.common.rpc import amqp as rpc_amqp
from nova.openstack.common.rpc import common as rpc_common
@@ -41,6 +41,9 @@ qpid_opts = [
cfg.StrOpt('qpid_port',
default='5672',
help='Qpid broker port'),
+ cfg.ListOpt('qpid_hosts',
+ default=['$qpid_hostname:$qpid_port'],
+ help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
@@ -50,24 +53,6 @@ qpid_opts = [
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
- cfg.BoolOpt('qpid_reconnect',
- default=True,
- help='Automatically reconnect'),
- cfg.IntOpt('qpid_reconnect_timeout',
- default=0,
- help='Reconnection timeout in seconds'),
- cfg.IntOpt('qpid_reconnect_limit',
- default=0,
- help='Max reconnections before giving up'),
- cfg.IntOpt('qpid_reconnect_interval_min',
- default=0,
- help='Minimum seconds between reconnection attempts'),
- cfg.IntOpt('qpid_reconnect_interval_max',
- default=0,
- help='Maximum seconds between reconnection attempts'),
- cfg.IntOpt('qpid_reconnect_interval',
- default=0,
- help='Equivalent to setting max and min to the same value'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
@@ -139,7 +124,8 @@ class ConsumerBase(object):
"""Fetch the message and pass it to the callback object"""
message = self.receiver.fetch()
try:
- self.callback(message.content)
+ msg = rpc_common.deserialize_msg(message.content)
+ self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
@@ -292,52 +278,45 @@ class Connection(object):
self.session = None
self.consumers = {}
self.consumer_thread = None
+ self.proxy_callbacks = []
self.conf = conf
- if server_params is None:
- server_params = {}
-
- default_params = dict(hostname=self.conf.qpid_hostname,
- port=self.conf.qpid_port,
- username=self.conf.qpid_username,
- password=self.conf.qpid_password)
+ if server_params and 'hostname' in server_params:
+ # NOTE(russellb) This enables support for cast_to_server.
+ server_params['qpid_hosts'] = [
+ '%s:%d' % (server_params['hostname'],
+ server_params.get('port', 5672))
+ ]
+
+ params = {
+ 'qpid_hosts': self.conf.qpid_hosts,
+ 'username': self.conf.qpid_username,
+ 'password': self.conf.qpid_password,
+ }
+ params.update(server_params or {})
- params = server_params
- for key in default_params.keys():
- params.setdefault(key, default_params[key])
+ self.brokers = params['qpid_hosts']
+ self.username = params['username']
+ self.password = params['password']
+ self.connection_create(self.brokers[0])
+ self.reconnect()
- self.broker = params['hostname'] + ":" + str(params['port'])
+ def connection_create(self, broker):
# Create the connection - this does not open the connection
- self.connection = qpid.messaging.Connection(self.broker)
+ self.connection = qpid.messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
- self.connection.username = params['username']
- self.connection.password = params['password']
+ self.connection.username = self.username
+ self.connection.password = self.password
+
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
- self.connection.reconnect = self.conf.qpid_reconnect
- if self.conf.qpid_reconnect_timeout:
- self.connection.reconnect_timeout = (
- self.conf.qpid_reconnect_timeout)
- if self.conf.qpid_reconnect_limit:
- self.connection.reconnect_limit = self.conf.qpid_reconnect_limit
- if self.conf.qpid_reconnect_interval_max:
- self.connection.reconnect_interval_max = (
- self.conf.qpid_reconnect_interval_max)
- if self.conf.qpid_reconnect_interval_min:
- self.connection.reconnect_interval_min = (
- self.conf.qpid_reconnect_interval_min)
- if self.conf.qpid_reconnect_interval:
- self.connection.reconnect_interval = (
- self.conf.qpid_reconnect_interval)
+ # Reconnection is done by self.reconnect()
+ self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.protocol = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
- # Open is part of reconnect -
- # NOTE(WGH) not sure we need this with the reconnect flags
- self.reconnect()
-
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
@@ -352,23 +331,36 @@ class Connection(object):
except qpid.messaging.exceptions.ConnectionError:
pass
+ attempt = 0
+ delay = 1
while True:
+ broker = self.brokers[attempt % len(self.brokers)]
+ attempt += 1
+
try:
+ self.connection_create(broker)
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
- LOG.error(_('Unable to connect to AMQP server: %s'), e)
- time.sleep(self.conf.qpid_reconnect_interval or 1)
+ msg_dict = dict(e=e, delay=delay)
+ msg = _("Unable to connect to AMQP server: %(e)s. "
+ "Sleeping %(delay)s seconds") % msg_dict
+ LOG.error(msg)
+ time.sleep(delay)
+ delay = min(2 * delay, 60)
else:
+ LOG.info(_('Connected to AMQP server on %s'), broker)
break
- LOG.info(_('Connected to AMQP server on %s'), self.broker)
-
self.session = self.connection.session()
- for consumer in self.consumers.itervalues():
- consumer.reconnect(self.session)
-
if self.consumers:
+ consumers = self.consumers
+ self.consumers = {}
+
+ for consumer in consumers.itervalues():
+ consumer.reconnect(self.session)
+ self._register_consumer(consumer)
+
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
@@ -384,12 +376,14 @@ class Connection(object):
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.connection.close()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
@@ -444,6 +438,11 @@ class Connection(object):
pass
self.consumer_thread = None
+ def wait_on_proxy_callbacks(self):
+ """Wait for all proxy callback threads to exit."""
+ for proxy_cb in self.proxy_callbacks:
+ proxy_cb.wait()
+
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class"""
@@ -519,6 +518,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
@@ -534,6 +534,7 @@ class Connection(object):
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
@@ -592,10 +593,11 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg):
rpc_amqp.get_connection_pool(conf, Connection))
-def notify(conf, context, topic, msg):
+def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
+ rpc_amqp.get_connection_pool(conf, Connection),
+ envelope)
def cleanup():
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 0daf07cf4..d99d390f2 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -205,7 +205,9 @@ class ZmqClient(object):
def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
self.outq = ZmqSocket(addr, socket_type, bind=bind)
- def cast(self, msg_id, topic, data):
+ def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
+ if serialize:
+ data = rpc_common.serialize_msg(data, force_envelope)
self.outq.send([str(msg_id), str(topic), str('cast'),
_serialize(data)])
@@ -250,7 +252,7 @@ class InternalContext(object):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
- data.setdefault('args', [])
+ data.setdefault('args', {})
try:
result = proxy.dispatch(
@@ -259,7 +261,14 @@ class InternalContext(object):
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
+ except rpc_common.ClientException, e:
+ LOG.debug(_("Expected exception during message handling (%s)") %
+ e._exc_info[1])
+ return {'exc':
+ rpc_common.serialize_remote_exception(e._exc_info,
+ log_failure=False)}
except Exception:
+ LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
@@ -314,7 +323,7 @@ class ConsumerBase(object):
return
data.setdefault('version', None)
- data.setdefault('args', [])
+ data.setdefault('args', {})
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
@@ -426,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
sock_type = zmq.PUB
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
- inside = _deserialize(in_msg)
+ inside = rpc_common.deserialize_msg(_deserialize(in_msg))
msg_id = inside[-1]['args']['msg_id']
response = inside[-1]['args']['response']
LOG.debug(_("->response->%s"), response)
@@ -473,7 +482,7 @@ class ZmqReactor(ZmqBaseReactor):
msg_id, topic, style, in_msg = data
- ctx, request = _deserialize(in_msg)
+ ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
ctx = RpcContext.unmarshal(ctx)
proxy = self.proxies[sock]
@@ -524,7 +533,8 @@ class Connection(rpc_common.Connection):
self.reactor.consume_in_thread()
-def _cast(addr, context, msg_id, topic, msg, timeout=None):
+def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
+ force_envelope=False):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
@@ -533,7 +543,7 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None):
conn = ZmqClient(addr)
# assumes cast can't return an exception
- conn.cast(msg_id, topic, payload)
+ conn.cast(msg_id, topic, payload, serialize, force_envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
@@ -602,7 +612,8 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
return responses[-1]
-def _multi_send(method, context, topic, msg, timeout=None):
+def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
+ force_envelope=False):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
@@ -628,7 +639,8 @@ def _multi_send(method, context, topic, msg, timeout=None):
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
- _topic, _topic, msg, timeout)
+ _topic, _topic, msg, timeout, serialize,
+ force_envelope)
return
return method(_addr, context, _topic, _topic, msg, timeout)
@@ -669,6 +681,8 @@ def notify(conf, context, topic, msg, **kwargs):
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
+ kwargs['serialize'] = kwargs.pop('envelope')
+ kwargs['force_envelope'] = True
cast(conf, context, topic, msg, **kwargs)
diff --git a/nova/openstack/common/rpc/matchmaker.py b/nova/openstack/common/rpc/matchmaker.py
index 783e3713c..8b2c67a44 100644
--- a/nova/openstack/common/rpc/matchmaker.py
+++ b/nova/openstack/common/rpc/matchmaker.py
@@ -21,10 +21,10 @@ return keys for direct exchanges, per (approximate) AMQP parlance.
import contextlib
import itertools
import json
-import logging
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
matchmaker_opts = [
diff --git a/nova/openstack/common/rpc/service.py b/nova/openstack/common/rpc/service.py
new file mode 100644
index 000000000..94dc7960e
--- /dev/null
+++ b/nova/openstack/common/rpc/service.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
+from nova.openstack.common import service
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Service(service.Service):
+ """Service object for binaries running on hosts.
+
+ A service enables rpc by listening to queues based on topic and host."""
+ def __init__(self, host, topic, manager=None):
+ super(Service, self).__init__()
+ self.host = host
+ self.topic = topic
+ if manager is None:
+ self.manager = self
+ else:
+ self.manager = manager
+
+ def start(self):
+ super(Service, self).start()
+
+ self.conn = rpc.create_connection(new=True)
+ LOG.debug(_("Creating Consumer connection for Service %s") %
+ self.topic)
+
+ dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
+
+ # Share this same connection for these Consumers
+ self.conn.create_consumer(self.topic, dispatcher, fanout=False)
+
+ node_topic = '%s.%s' % (self.topic, self.host)
+ self.conn.create_consumer(node_topic, dispatcher, fanout=False)
+
+ self.conn.create_consumer(self.topic, dispatcher, fanout=True)
+
+ # Hook to allow the manager to do other initializations after
+ # the rpc connection is created.
+ if callable(getattr(self.manager, 'initialize_service_hook', None)):
+ self.manager.initialize_service_hook(self)
+
+ # Consume from all consumers in a thread
+ self.conn.consume_in_thread()
+
+ def stop(self):
+ # Try to shut the connection down, but if we get any sort of
+ # errors, go ahead and ignore them.. as we're shutting down anyway
+ try:
+ self.conn.close()
+ except Exception:
+ pass
+ super(Service, self).stop()
diff --git a/nova/openstack/common/setup.py b/nova/openstack/common/setup.py
index 4e2a57717..e6f72f034 100644
--- a/nova/openstack/common/setup.py
+++ b/nova/openstack/common/setup.py
@@ -117,8 +117,12 @@ def write_requirements():
def _run_shell_command(cmd):
- output = subprocess.Popen(["/bin/sh", "-c", cmd],
- stdout=subprocess.PIPE)
+ if os.name == 'nt':
+ output = subprocess.Popen(["cmd.exe", "/C", cmd],
+ stdout=subprocess.PIPE)
+ else:
+ output = subprocess.Popen(["/bin/sh", "-c", cmd],
+ stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
@@ -136,15 +140,17 @@ def _get_git_next_version_suffix(branch_name):
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
- if not milestonever:
- milestonever = ""
+ if milestonever:
+ first_half = "%s~%s" % (milestonever, datestamp)
+ else:
+ first_half = datestamp
+
post_version = _get_git_post_version()
# post version should look like:
# 0.1.1.4.gcc9e28a
# where the bit after the last . is the short sha, and the bit between
# the last and second to last is the revno count
(revno, sha) = post_version.split(".")[-2:]
- first_half = "%s~%s" % (milestonever, datestamp)
second_half = "%s%s.%s" % (revno_prefix, revno, sha)
return ".".join((first_half, second_half))
diff --git a/nova/openstack/common/timeutils.py b/nova/openstack/common/timeutils.py
index 86004391d..0f346087f 100644
--- a/nova/openstack/common/timeutils.py
+++ b/nova/openstack/common/timeutils.py
@@ -71,11 +71,15 @@ def normalize_time(timestamp):
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
+ if isinstance(before, basestring):
+ before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
+ if isinstance(after, basestring):
+ after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
@@ -87,7 +91,10 @@ def utcnow_ts():
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
- return utcnow.override_time
+ try:
+ return utcnow.override_time.pop(0)
+ except AttributeError:
+ return utcnow.override_time
return datetime.datetime.utcnow()
@@ -95,14 +102,21 @@ utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
- """Override utils.utcnow to return a constant time."""
+ """
+ Override utils.utcnow to return a constant time or a list thereof,
+ one at a time.
+ """
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
- utcnow.override_time += timedelta
+ try:
+ for dt in utcnow.override_time:
+ dt += timedelta
+ except TypeError:
+ utcnow.override_time += timedelta
def advance_time_seconds(seconds):
@@ -135,3 +149,16 @@ def unmarshall_time(tyme):
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
+
+
+def delta_seconds(before, after):
+ """
+ Compute the difference in seconds between two date, time, or
+ datetime objects (as a float, to microsecond resolution).
+ """
+ delta = after - before
+ try:
+ return delta.total_seconds()
+ except AttributeError:
+ return ((delta.days * 24 * 3600) + delta.seconds +
+ float(delta.microseconds) / (10 ** 6))
diff --git a/nova/openstack/common/uuidutils.py b/nova/openstack/common/uuidutils.py
index 51042a798..7608acb94 100644
--- a/nova/openstack/common/uuidutils.py
+++ b/nova/openstack/common/uuidutils.py
@@ -22,6 +22,10 @@ UUID related utilities and helper functions.
import uuid
+def generate_uuid():
+ return str(uuid.uuid4())
+
+
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
diff --git a/nova/paths.py b/nova/paths.py
new file mode 100644
index 000000000..7405a7409
--- /dev/null
+++ b/nova/paths.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova.openstack.common import cfg
+
+path_opts = [
+ cfg.StrOpt('pybasedir',
+ default=os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../')),
+ help='Directory where the nova python module is installed'),
+ cfg.StrOpt('bindir',
+ default='$pybasedir/bin',
+ help='Directory where nova binaries are installed'),
+ cfg.StrOpt('state_path',
+ default='$pybasedir',
+ help="Top-level directory for maintaining nova's state"),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(path_opts)
+
+
+def basedir_def(*args):
+ """Return an uninterpolated path relative to $pybasedir."""
+ return os.path.join('$pybasedir', *args)
+
+
+def bindir_def(*args):
+ """Return an uninterpolated path relative to $bindir."""
+ return os.path.join('$bindir', *args)
+
+
+def state_path_def(*args):
+ """Return an uninterpolated path relative to $state_path."""
+ return os.path.join('$state_path', *args)
+
+
+def basedir_rel(*args):
+ """Return a path relative to $pybasedir."""
+ return os.path.join(CONF.pybasedir, *args)
+
+
+def bindir_rel(*args):
+ """Return a path relative to $bindir."""
+ return os.path.join(CONF.bindir, *args)
+
+
+def state_path_rel(*args):
+ """Return a path relative to $state_path."""
+ return os.path.join(CONF.state_path, *args)
diff --git a/nova/policy.py b/nova/policy.py
index 9506635e9..27e261eac 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -15,13 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Policy Engine For Nova"""
+"""Policy Engine For Nova."""
import os.path
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import policy
from nova import utils
@@ -36,7 +34,7 @@ policy_opts = [
help=_('Rule checked when requested rule is not found')),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(policy_opts)
_POLICY_PATH = None
diff --git a/nova/quota.py b/nova/quota.py
index e4c6f1d49..1856c97c1 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -16,14 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Quotas for instances, volumes, and floating ips."""
+"""Quotas for instances, and floating ips."""
import datetime
-from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -42,12 +40,6 @@ quota_opts = [
cfg.IntOpt('quota_ram',
default=50 * 1024,
help='megabytes of instance ram allowed per project'),
- cfg.IntOpt('quota_volumes',
- default=10,
- help='number of volumes allowed per project'),
- cfg.IntOpt('quota_gigabytes',
- default=1000,
- help='number of volume gigabytes allowed per project'),
cfg.IntOpt('quota_floating_ips',
default=10,
help='number of floating ips allowed per project'),
@@ -86,7 +78,7 @@ quota_opts = [
help='default driver to use for quota checks'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(quota_opts)
@@ -206,7 +198,7 @@ class DbQuotaDriver(object):
return quotas
- def _get_quotas(self, context, resources, keys, has_sync):
+ def _get_quotas(self, context, resources, keys, has_sync, project_id=None):
"""
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
@@ -219,6 +211,9 @@ class DbQuotaDriver(object):
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Filter resources
@@ -237,12 +232,12 @@ class DbQuotaDriver(object):
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
- context.project_id,
+ project_id,
context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
- def limit_check(self, context, resources, values):
+ def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -262,6 +257,9 @@ class DbQuotaDriver(object):
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Ensure no value is less than zero
@@ -269,9 +267,13 @@ class DbQuotaDriver(object):
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
+
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
- has_sync=False)
+ has_sync=False, project_id=project_id)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
@@ -281,7 +283,8 @@ class DbQuotaDriver(object):
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
- def reserve(self, context, resources, deltas, expire=None):
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -311,6 +314,9 @@ class DbQuotaDriver(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
# Set up the reservation expiration
@@ -323,12 +329,16 @@ class DbQuotaDriver(object):
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
+
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
- has_sync=True)
+ has_sync=True, project_id=project_id)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
@@ -336,27 +346,40 @@ class DbQuotaDriver(object):
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
- CONF.until_refresh, CONF.max_age)
+ CONF.until_refresh, CONF.max_age,
+ project_id=project_id)
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
- db.reservation_commit(context, reservations)
+ db.reservation_commit(context, reservations, project_id=project_id)
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
+ # If project_id is None, then we use the project_id in context
+ if project_id is None:
+ project_id = context.project_id
- db.reservation_rollback(context, reservations)
+ db.reservation_rollback(context, reservations, project_id=project_id)
def usage_reset(self, context, resources):
"""
@@ -410,6 +433,192 @@ class DbQuotaDriver(object):
db.reservation_expire(context)
+class NoopQuotaDriver(object):
+ """Driver that turns quotas calls into no-ops and pretends that quotas
+ for all resources are unlimited. This can be used if you do not
+ wish to have any quota checking. For instance, with nova compute
+ cells, the parent cell should do quota checking, but the child cell
+ should not.
+ """
+
+ def get_by_project(self, context, project_id, resource):
+ """Get a specific quota by project."""
+ # Unlimited
+ return -1
+
+ def get_by_class(self, context, quota_class, resource):
+ """Get a specific quota by quota class."""
+ # Unlimited
+ return -1
+
+ def get_defaults(self, context, resources):
+ """Given a list of resources, retrieve the default quotas.
+
+ :param context: The request context, for access checks.
+ :param resources: A dictionary of the registered resources.
+ """
+ quotas = {}
+ for resource in resources.values():
+ quotas[resource.name] = -1
+ return quotas
+
+ def get_class_quotas(self, context, resources, quota_class,
+ defaults=True):
+ """
+ Given a list of resources, retrieve the quotas for the given
+ quota class.
+
+ :param context: The request context, for access checks.
+ :param resources: A dictionary of the registered resources.
+ :param quota_class: The name of the quota class to return
+ quotas for.
+ :param defaults: If True, the default value will be reported
+ if there is no specific value for the
+ resource.
+ """
+ quotas = {}
+ for resource in resources.values():
+ quotas[resource.name] = -1
+ return quotas
+
+ def get_project_quotas(self, context, resources, project_id,
+ quota_class=None, defaults=True,
+ usages=True):
+ """
+ Given a list of resources, retrieve the quotas for the given
+ project.
+
+ :param context: The request context, for access checks.
+ :param resources: A dictionary of the registered resources.
+ :param project_id: The ID of the project to return quotas for.
+ :param quota_class: If project_id != context.project_id, the
+ quota class cannot be determined. This
+ parameter allows it to be specified. It
+ will be ignored if project_id ==
+ context.project_id.
+ :param defaults: If True, the quota class value (or the
+ default value, if there is no value from the
+ quota class) will be reported if there is no
+ specific value for the resource.
+ :param usages: If True, the current in_use and reserved counts
+ will also be returned.
+ """
+ quotas = {}
+ for resource in resources.values():
+ quotas[resource.name] = -1
+ return quotas
+
+ def limit_check(self, context, resources, values):
+ """Check simple quota limits.
+
+ For limits--those quotas for which there is no usage
+ synchronization function--this method checks that a set of
+ proposed values are permitted by the limit restriction.
+
+ This method will raise a QuotaResourceUnknown exception if a
+ given resource is unknown or if it is not a simple limit
+ resource.
+
+ If any of the proposed values is over the defined quota, an
+ OverQuota exception will be raised with the sorted list of the
+ resources which are too high. Otherwise, the method returns
+ nothing.
+
+ :param context: The request context, for access checks.
+ :param resources: A dictionary of the registered resources.
+ :param values: A dictionary of the values to check against the
+ quota.
+ """
+ pass
+
+ def reserve(self, context, resources, deltas, expire=None):
+ """Check quotas and reserve resources.
+
+ For counting quotas--those quotas for which there is a usage
+ synchronization function--this method checks quotas against
+ current usage and the desired deltas.
+
+ This method will raise a QuotaResourceUnknown exception if a
+ given resource is unknown or if it does not have a usage
+ synchronization function.
+
+ If any of the proposed values is over the defined quota, an
+ OverQuota exception will be raised with the sorted list of the
+ resources which are too high. Otherwise, the method returns a
+ list of reservation UUIDs which were created.
+
+ :param context: The request context, for access checks.
+ :param resources: A dictionary of the registered resources.
+ :param deltas: A dictionary of the proposed delta changes.
+ :param expire: An optional parameter specifying an expiration
+ time for the reservations. If it is a simple
+ number, it is interpreted as a number of
+ seconds and added to the current time; if it is
+ a datetime.timedelta object, it will also be
+ added to the current time. A datetime.datetime
+ object will be interpreted as the absolute
+ expiration time. If None is specified, the
+ default expiration time set by
+ --default-reservation-expire will be used (this
+ value will be treated as a number of seconds).
+ """
+ return []
+
+ def commit(self, context, reservations):
+ """Commit reservations.
+
+ :param context: The request context, for access checks.
+ :param reservations: A list of the reservation UUIDs, as
+ returned by the reserve() method.
+ """
+ pass
+
+ def rollback(self, context, reservations):
+ """Roll back reservations.
+
+ :param context: The request context, for access checks.
+ :param reservations: A list of the reservation UUIDs, as
+ returned by the reserve() method.
+ """
+ pass
+
+ def usage_reset(self, context, resources):
+ """
+ Reset the usage records for a particular user on a list of
+ resources. This will force that user's usage records to be
+ refreshed the next time a reservation is made.
+
+ Note: this does not affect the currently outstanding
+ reservations the user has; those reservations must be
+ committed or rolled back (or expired).
+
+ :param context: The request context, for access checks.
+ :param resources: A list of the resource names for which the
+ usage must be reset.
+ """
+ pass
+
+ def destroy_all_by_project(self, context, project_id):
+ """
+ Destroy all quotas, usages, and reservations associated with a
+ project.
+
+ :param context: The request context, for access checks.
+ :param project_id: The ID of the project being deleted.
+ """
+ pass
+
+ def expire(self, context):
+ """Expire reservations.
+
+ Explores all currently existing reservations and rolls back
+ any that have expired.
+
+ :param context: The request context, for access checks.
+ """
+ pass
+
+
class BaseResource(object):
"""Describe a single resource for quota checking."""
@@ -665,7 +874,7 @@ class QuotaEngine(object):
return res.count(context, *args, **kwargs)
- def limit_check(self, context, **values):
+ def limit_check(self, context, project_id=None, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -685,11 +894,15 @@ class QuotaEngine(object):
nothing.
:param context: The request context, for access checks.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
- return self._driver.limit_check(context, self._resources, values)
+ return self._driver.limit_check(context, self._resources, values,
+ project_id=project_id)
- def reserve(self, context, expire=None, **deltas):
+ def reserve(self, context, expire=None, project_id=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -719,25 +932,32 @@ class QuotaEngine(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
reservations = self._driver.reserve(context, self._resources, deltas,
- expire=expire)
+ expire=expire,
+ project_id=project_id)
LOG.debug(_("Created reservations %(reservations)s") % locals())
return reservations
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
try:
- self._driver.commit(context, reservations)
+ self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
@@ -745,17 +965,21 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
+ LOG.debug(_("Committed reservations %(reservations)s") % locals())
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
try:
- self._driver.rollback(context, reservations)
+ self._driver.rollback(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
@@ -763,6 +987,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % locals())
+ LOG.debug(_("Rolled back reservations %(reservations)s") % locals())
def usage_reset(self, context, resources):
"""
@@ -814,12 +1039,6 @@ def _sync_instances(context, project_id, session):
context, project_id, session=session)))
-def _sync_volumes(context, project_id, session):
- return dict(zip(('volumes', 'gigabytes'),
- db.volume_data_get_for_project(
- context, project_id, session=session)))
-
-
def _sync_floating_ips(context, project_id, session):
return dict(floating_ips=db.floating_ip_count_by_project(
context, project_id, session=session))
@@ -837,8 +1056,6 @@ resources = [
ReservableResource('instances', _sync_instances, 'quota_instances'),
ReservableResource('cores', _sync_instances, 'quota_cores'),
ReservableResource('ram', _sync_instances, 'quota_ram'),
- ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
- ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
ReservableResource('floating_ips', _sync_floating_ips,
'quota_floating_ips'),
AbsoluteResource('metadata_items', 'quota_metadata_items'),
diff --git a/nova/rootwrap/filters.py b/nova/rootwrap/filters.py
index 52808d9ec..8958f1ba1 100644
--- a/nova/rootwrap/filters.py
+++ b/nova/rootwrap/filters.py
@@ -20,33 +20,52 @@ import re
class CommandFilter(object):
- """Command filter only checking that the 1st argument matches exec_path"""
+ """Command filter only checking that the 1st argument matches exec_path."""
def __init__(self, exec_path, run_as, *args):
+ self.name = ''
self.exec_path = exec_path
self.run_as = run_as
self.args = args
+ self.real_exec = None
+
+ def get_exec(self, exec_dirs=[]):
+ """Returns existing executable, or empty string if none found."""
+ if self.real_exec is not None:
+ return self.real_exec
+ self.real_exec = ""
+ if self.exec_path.startswith('/'):
+ if os.access(self.exec_path, os.X_OK):
+ self.real_exec = self.exec_path
+ else:
+ for binary_path in exec_dirs:
+ expanded_path = os.path.join(binary_path, self.exec_path)
+ if os.access(expanded_path, os.X_OK):
+ self.real_exec = expanded_path
+ break
+ return self.real_exec
def match(self, userargs):
- """Only check that the first argument (command) matches exec_path"""
+ """Only check that the first argument (command) matches exec_path."""
if (os.path.basename(self.exec_path) == userargs[0]):
return True
return False
- def get_command(self, userargs):
+ def get_command(self, userargs, exec_dirs=[]):
"""Returns command to execute (with sudo -u if run_as != root)."""
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
if (self.run_as != 'root'):
# Used to run commands at lesser privileges
- return ['sudo', '-u', self.run_as, self.exec_path] + userargs[1:]
- return [self.exec_path] + userargs[1:]
+ return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
+ return [to_exec] + userargs[1:]
def get_environment(self, userargs):
- """Returns specific environment to set, None if none"""
+ """Returns specific environment to set, None if none."""
return None
class RegExpFilter(CommandFilter):
- """Command filter doing regexp matching for every argument"""
+ """Command filter doing regexp matching for every argument."""
def match(self, userargs):
# Early skip if command or number of args don't match
@@ -70,27 +89,35 @@ class RegExpFilter(CommandFilter):
class DnsmasqFilter(CommandFilter):
- """Specific filter for the dnsmasq call (which includes env)"""
+ """Specific filter for the dnsmasq call (which includes env)."""
+
+ CONFIG_FILE_ARG = 'CONFIG_FILE'
def match(self, userargs):
if (userargs[0] == 'env' and
- userargs[1].startswith('FLAGFILE=') and
+ userargs[1].startswith(self.CONFIG_FILE_ARG) and
userargs[2].startswith('NETWORK_ID=') and
userargs[3] == 'dnsmasq'):
return True
return False
- def get_command(self, userargs):
+ def get_command(self, userargs, exec_dirs=[]):
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
dnsmasq_pos = userargs.index('dnsmasq')
- return [self.exec_path] + userargs[dnsmasq_pos + 1:]
+ return [to_exec] + userargs[dnsmasq_pos + 1:]
def get_environment(self, userargs):
env = os.environ.copy()
- env['FLAGFILE'] = userargs[1].split('=')[-1]
+ env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1]
env['NETWORK_ID'] = userargs[2].split('=')[-1]
return env
+class DeprecatedDnsmasqFilter(DnsmasqFilter):
+ """Variant of dnsmasq filter to support old-style FLAGFILE."""
+ CONFIG_FILE_ARG = 'FLAGFILE'
+
+
class KillFilter(CommandFilter):
"""Specific filter for the kill calls.
1st argument is the user to run /bin/kill under
@@ -137,7 +164,7 @@ class KillFilter(CommandFilter):
class ReadFileFilter(CommandFilter):
- """Specific filter for the utils.read_file_as_root call"""
+ """Specific filter for the utils.read_file_as_root call."""
def __init__(self, file_path, *args):
self.file_path = file_path
diff --git a/nova/rootwrap/wrapper.py b/nova/rootwrap/wrapper.py
index 3dd7ee7e3..70bd63c47 100644
--- a/nova/rootwrap/wrapper.py
+++ b/nova/rootwrap/wrapper.py
@@ -17,23 +17,93 @@
import ConfigParser
+import logging
+import logging.handlers
import os
import string
from nova.rootwrap import filters
+class NoFilterMatched(Exception):
+ """This exception is raised when no filter matched."""
+ pass
+
+
+class FilterMatchNotExecutable(Exception):
+ """
+ This exception is raised when a filter matched but no executable was
+ found.
+ """
+ def __init__(self, match=None, **kwargs):
+ self.match = match
+
+
+class RootwrapConfig(object):
+
+ def __init__(self, config):
+ # filters_path
+ self.filters_path = config.get("DEFAULT", "filters_path").split(",")
+
+ # exec_dirs
+ if config.has_option("DEFAULT", "exec_dirs"):
+ self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
+ else:
+ # Use system PATH if exec_dirs is not specified
+ self.exec_dirs = os.environ["PATH"].split(':')
+
+ # syslog_log_facility
+ if config.has_option("DEFAULT", "syslog_log_facility"):
+ v = config.get("DEFAULT", "syslog_log_facility")
+ facility_names = logging.handlers.SysLogHandler.facility_names
+ self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
+ v, None)
+ if self.syslog_log_facility is None and v in facility_names:
+ self.syslog_log_facility = facility_names.get(v)
+ if self.syslog_log_facility is None:
+ raise ValueError('Unexpected syslog_log_facility: %s' % v)
+ else:
+ default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
+ self.syslog_log_facility = default_facility
+
+ # syslog_log_level
+ if config.has_option("DEFAULT", "syslog_log_level"):
+ v = config.get("DEFAULT", "syslog_log_level")
+ self.syslog_log_level = logging.getLevelName(v.upper())
+ if (self.syslog_log_level == "Level %s" % v.upper()):
+ raise ValueError('Unexepected syslog_log_level: %s' % v)
+ else:
+ self.syslog_log_level = logging.ERROR
+
+ # use_syslog
+ if config.has_option("DEFAULT", "use_syslog"):
+ self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
+ else:
+ self.use_syslog = False
+
+
+def setup_syslog(execname, facility, level):
+ rootwrap_logger = logging.getLogger()
+ rootwrap_logger.setLevel(level)
+ handler = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
+ handler.setFormatter(logging.Formatter(
+ os.path.basename(execname) + ': %(message)s'))
+ rootwrap_logger.addHandler(handler)
+
+
def build_filter(class_name, *args):
- """Returns a filter object of class class_name"""
+ """Returns a filter object of class class_name."""
if not hasattr(filters, class_name):
- # TODO(ttx): Log the error (whenever nova-rootwrap has a log file)
+ logging.warning("Skipping unknown filter class (%s) specified "
+ "in filter definitions" % class_name)
return None
filterclass = getattr(filters, class_name)
return filterclass(*args)
def load_filters(filters_path):
- """Load filters from a list of directories"""
+ """Load filters from a list of directories."""
filterlist = []
for filterdir in filters_path:
if not os.path.isdir(filterdir):
@@ -46,27 +116,34 @@ def load_filters(filters_path):
newfilter = build_filter(*filterdefinition)
if newfilter is None:
continue
+ newfilter.name = name
filterlist.append(newfilter)
return filterlist
-def match_filter(filters, userargs):
+def match_filter(filters, userargs, exec_dirs=[]):
"""
Checks user command and arguments through command filters and
- returns the first matching filter, or None is none matched.
+ returns the first matching filter.
+ Raises NoFilterMatched if no filter matched.
+ Raises FilterMatchNotExecutable if no executable was found for the
+ best filter match.
"""
-
- found_filter = None
+ first_not_executable_filter = None
for f in filters:
if f.match(userargs):
# Try other filters if executable is absent
- if not os.access(f.exec_path, os.X_OK):
- if not found_filter:
- found_filter = f
+ if not f.get_exec(exec_dirs=exec_dirs):
+ if not first_not_executable_filter:
+ first_not_executable_filter = f
continue
# Otherwise return matching filter for execution
return f
- # No filter matched or first missing executable
- return found_filter
+ if first_not_executable_filter:
+ # A filter matched, but no executable was found for it
+ raise FilterMatchNotExecutable(match=first_not_executable_filter)
+
+ # No filter matched
+ raise NoFilterMatched()
diff --git a/nova/scheduler/baremetal_host_manager.py b/nova/scheduler/baremetal_host_manager.py
new file mode 100644
index 000000000..fdf482de7
--- /dev/null
+++ b/nova/scheduler/baremetal_host_manager.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Manage hosts in the current zone.
+"""
+
+from nova.scheduler import host_manager
+
+
+class BaremetalNodeState(host_manager.HostState):
+ """Mutable and immutable information tracked for a host.
+ This is an attempt to remove the ad-hoc data structures
+ previously used and lock down access.
+ """
+
+ def update_from_compute_node(self, compute):
+ """Update information about a host from its compute_node info."""
+ all_ram_mb = compute['memory_mb']
+
+ free_disk_mb = compute['free_disk_gb'] * 1024
+ free_ram_mb = compute['free_ram_mb']
+
+ self.free_ram_mb = free_ram_mb
+ self.total_usable_ram_mb = all_ram_mb
+ self.free_disk_mb = free_disk_mb
+ self.vcpus_total = compute['vcpus']
+ self.vcpus_used = compute['vcpus_used']
+
+ def consume_from_instance(self, instance):
+ self.free_ram_mb = 0
+ self.free_disk_mb = 0
+ self.vcpus_used = self.vcpus_total
+
+
+def new_host_state(self, host, node, capabilities=None, service=None):
+ """Returns an instance of BaremetalHostState or HostState according to
+ capabilities. If 'baremetal_driver' is in capabilities, it returns an
+ instance of BaremetalHostState. If not, returns an instance of HostState.
+ """
+ if capabilities is None:
+ capabilities = {}
+ cap = capabilities.get('compute', {})
+ if bool(cap.get('baremetal_driver')):
+ return BaremetalNodeState(host, node, capabilities, service)
+ else:
+ return host_manager.HostState(host, node, capabilities, service)
+
+
+class BaremetalHostManager(host_manager.HostManager):
+ """Bare-Metal HostManager class."""
+
+ # Override.
+ # Yes, this is not a class, and it is OK
+ host_state_cls = new_host_state
+
+ def __init__(self):
+ super(BaremetalHostManager, self).__init__()
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 1a608da29..e161166fd 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -23,12 +23,12 @@ Chance (Random) Scheduler implementation
import random
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.scheduler import driver
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
class ChanceScheduler(driver.Scheduler):
@@ -61,7 +61,7 @@ class ChanceScheduler(driver.Scheduler):
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
- """Create and run an instance or instances"""
+ """Create and run an instance or instances."""
instance_uuids = request_spec.get('instance_uuids')
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f93268906..d1ae1cd6e 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -28,19 +28,15 @@ from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
-from nova.openstack.common import rpc
from nova.openstack.common import timeutils
-from nova import utils
-
+from nova import servicegroup
LOG = logging.getLogger(__name__)
@@ -53,13 +49,9 @@ scheduler_driver_opts = [
help='Maximum number of attempts to schedule an instance'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
-CONF = config.CONF
-CONF.import_opt('instances_path', 'nova.compute.manager')
-CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
-
def handle_schedule_error(context, ex, instance_uuid, request_spec):
if not isinstance(ex, exception.NoValidHost):
@@ -90,46 +82,17 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
def instance_update_db(context, instance_uuid):
- '''Clear the host and set the scheduled_at field of an Instance.
+ '''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
- values = {'host': None, 'scheduled_at': now}
+ values = {'host': None, 'node': None, 'scheduled_at': now}
return db.instance_update(context, instance_uuid, values)
-def cast_to_compute_host(context, host, method, **kwargs):
- """Cast request to a compute host queue"""
-
- instance_uuid = kwargs.get('instance_uuid', None)
- if instance_uuid:
- instance_update_db(context, instance_uuid)
-
- rpc.cast(context,
- rpc.queue_get_for(context, CONF.compute_topic, host),
- {"method": method, "args": kwargs})
- LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
-
-
-def cast_to_host(context, topic, host, method, **kwargs):
- """Generic cast to host"""
-
- topic_mapping = {CONF.compute_topic: cast_to_compute_host}
-
- func = topic_mapping.get(topic)
- if func:
- cast_to_compute_host(context, host, method, **kwargs)
- else:
- rpc.cast(context,
- rpc.queue_get_for(context, topic, host),
- {"method": method, "args": kwargs})
- LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'")
- % locals())
-
-
def encode_instance(instance, local=True):
- """Encode locally created instance for return via RPC"""
+ """Encode locally created instance for return via RPC."""
# TODO(comstud): I would love to be able to return the full
# instance information here, but we'll need some modifications
# to the RPC code to handle datetime conversions with the
@@ -154,6 +117,7 @@ class Scheduler(object):
CONF.scheduler_host_manager)
self.compute_api = compute_api.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
+ self.servicegroup_api = servicegroup.API()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
@@ -166,7 +130,7 @@ class Scheduler(object):
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
- if utils.service_is_up(service)]
+ if self.servicegroup_api.service_is_up(service)]
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
@@ -228,12 +192,12 @@ class Scheduler(object):
# Checking src host exists and compute node
src = instance_ref['host']
try:
- services = db.service_get_all_compute_by_host(context, src)
+ service = db.service_get_by_compute_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
- if not utils.service_is_up(services[0]):
+ if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
@@ -245,11 +209,10 @@ class Scheduler(object):
"""
# Checking dest exists and compute node.
- dservice_refs = db.service_get_all_compute_by_host(context, dest)
- dservice_ref = dservice_refs[0]
+ dservice_ref = db.service_get_by_compute_host(context, dest)
# Checking dest host is alive.
- if not utils.service_is_up(dservice_ref):
+ if not self.servicegroup_api.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
@@ -326,5 +289,5 @@ class Scheduler(object):
:return: value specified by key
"""
- compute_node_ref = db.service_get_all_compute_by_host(context, host)
- return compute_node_ref[0]['compute_node'][0]
+ service_ref = db.service_get_by_compute_host(context, host)
+ return service_ref['compute_node'][0]
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index b2802931c..07a3f578a 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -19,19 +19,14 @@ You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
-import operator
-
-from nova import config
from nova import exception
-from nova import flags
-from nova.openstack.common import importutils
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.scheduler import driver
-from nova.scheduler import least_cost
from nova.scheduler import scheduler_options
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -39,7 +34,7 @@ class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
- self.cost_function_cache = {}
+ self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
def schedule_run_instance(self, context, request_spec,
@@ -61,9 +56,8 @@ class FilterScheduler(driver.Scheduler):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)
- weighted_hosts = self._schedule(context, CONF.compute_topic,
- request_spec, filter_properties,
- instance_uuids)
+ weighed_hosts = self._schedule(context, request_spec,
+ filter_properties, instance_uuids)
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
@@ -74,11 +68,11 @@ class FilterScheduler(driver.Scheduler):
try:
try:
- weighted_host = weighted_hosts.pop(0)
+ weighed_host = weighed_hosts.pop(0)
except IndexError:
raise exception.NoValidHost(reason="")
- self._provision_resource(context, weighted_host,
+ self._provision_resource(context, weighed_host,
request_spec,
filter_properties,
requested_networks,
@@ -108,66 +102,70 @@ class FilterScheduler(driver.Scheduler):
the prep_resize operation to it.
"""
- hosts = self._schedule(context, CONF.compute_topic, request_spec,
- filter_properties, [instance['uuid']])
- if not hosts:
+ weighed_hosts = self._schedule(context, request_spec,
+ filter_properties, [instance['uuid']])
+ if not weighed_hosts:
raise exception.NoValidHost(reason="")
- host = hosts.pop(0)
+ weighed_host = weighed_hosts.pop(0)
self._post_select_populate_filter_properties(filter_properties,
- host.host_state)
+ weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
# Forward off to the host
self.compute_rpcapi.prep_resize(context, image, instance,
- instance_type, host.host_state.host, reservations,
- request_spec=request_spec, filter_properties=filter_properties)
+ instance_type, weighed_host.obj.host, reservations,
+ request_spec=request_spec, filter_properties=filter_properties,
+ node=weighed_host.obj.nodename)
- def _provision_resource(self, context, weighted_host, request_spec,
+ def _provision_resource(self, context, weighed_host, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance_uuid=None):
"""Create the requested resource in this Zone."""
payload = dict(request_spec=request_spec,
- weighted_host=weighted_host.to_dict(),
+ weighted_host=weighed_host.to_dict(),
instance_id=instance_uuid)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
- updated_instance = driver.instance_update_db(context, instance_uuid)
+ updated_instance = driver.instance_update_db(context,
+ instance_uuid)
self._post_select_populate_filter_properties(filter_properties,
- weighted_host.host_state)
+ weighed_host.obj)
self.compute_rpcapi.run_instance(context, instance=updated_instance,
- host=weighted_host.host_state.host,
+ host=weighed_host.obj.host,
request_spec=request_spec, filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files,
- admin_password=admin_password, is_first_time=is_first_time)
+ admin_password=admin_password, is_first_time=is_first_time,
+ node=weighed_host.obj.nodename)
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
- """Add additional information to the filter properties after a host has
+ """Add additional information to the filter properties after a node has
been selected by the scheduling process.
"""
- # Add a retry entry for the selected compute host:
- self._add_retry_host(filter_properties, host_state.host)
+ # Add a retry entry for the selected compute host and node:
+ self._add_retry_host(filter_properties, host_state.host,
+ host_state.nodename)
self._add_oversubscription_policy(filter_properties, host_state)
- def _add_retry_host(self, filter_properties, host):
- """Add a retry entry for the selected compute host. In the event that
+ def _add_retry_host(self, filter_properties, host, node):
+ """Add a retry entry for the selected compute node. In the event that
the request gets re-scheduled, this entry will signal that the given
- host has already been tried.
+ node has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
- hosts.append(host)
+ hosts.append([host, node])
def _add_oversubscription_policy(self, filter_properties, host_state):
filter_properties['limits'] = host_state.limits
@@ -193,6 +191,23 @@ class FilterScheduler(driver.Scheduler):
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
+ def _log_compute_error(self, instance_uuid, retry):
+ """If the request contained an exception from a previous compute
+ build/resize operation, log it to aid debugging
+ """
+ exc = retry.pop('exc', None) # string-ified exception from compute
+ if not exc:
+ return # no exception info from a prevous attempt, skip
+
+ hosts = retry.get('hosts', None)
+ if not hosts:
+ return # no previously attempted hosts, skip
+
+ last_host, last_node = hosts[-1]
+ msg = _("Error from last host: %(last_host)s (node %(last_node)s): "
+ "%(exc)s") % locals()
+ LOG.error(msg, instance_uuid=instance_uuid)
+
def _populate_retry(self, filter_properties, instance_properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
@@ -214,26 +229,23 @@ class FilterScheduler(driver.Scheduler):
}
filter_properties['retry'] = retry
+ instance_uuid = instance_properties.get('uuid')
+ self._log_compute_error(instance_uuid, retry)
+
if retry['num_attempts'] > max_attempts:
- instance_uuid = instance_properties.get('uuid')
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"instance %(instance_uuid)s") % locals()
raise exception.NoValidHost(reason=msg)
- def _schedule(self, context, topic, request_spec, filter_properties,
+ def _schedule(self, context, request_spec, filter_properties,
instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
- if topic != CONF.compute_topic:
- msg = _("Scheduler only understands Compute nodes (for now)")
- raise NotImplementedError(msg)
-
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
- cost_functions = self.get_cost_functions()
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
@@ -260,8 +272,7 @@ class FilterScheduler(driver.Scheduler):
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
- hosts = self.host_manager.get_all_host_states(
- elevated, topic)
+ hosts = self.host_manager.get_all_host_states(elevated)
selected_hosts = []
if instance_uuids:
@@ -270,7 +281,7 @@ class FilterScheduler(driver.Scheduler):
num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
- hosts = self.host_manager.filter_hosts(hosts,
+ hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
# Can't get any more locally.
@@ -278,63 +289,12 @@ class FilterScheduler(driver.Scheduler):
LOG.debug(_("Filtered %(hosts)s") % locals())
- # weighted_host = WeightedHost() ... the best
- # host for the job.
- # TODO(comstud): filter_properties will also be used for
- # weighing and I plan fold weighing into the host manager
- # in a future patch. I'll address the naming of this
- # variable at that time.
- weighted_host = least_cost.weighted_sum(cost_functions,
- hosts, filter_properties)
- LOG.debug(_("Weighted %(weighted_host)s") % locals())
- selected_hosts.append(weighted_host)
-
+ weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
+ filter_properties)
+ best_host = weighed_hosts[0]
+ LOG.debug(_("Choosing host %(best_host)s") % locals())
+ selected_hosts.append(best_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
- weighted_host.host_state.consume_from_instance(
- instance_properties)
-
- selected_hosts.sort(key=operator.attrgetter('weight'))
+ best_host.obj.consume_from_instance(instance_properties)
return selected_hosts
-
- def get_cost_functions(self, topic=None):
- """Returns a list of tuples containing weights and cost functions to
- use for weighing hosts
- """
- if topic is None:
- # Schedulers only support compute right now.
- topic = CONF.compute_topic
- if topic in self.cost_function_cache:
- return self.cost_function_cache[topic]
-
- cost_fns = []
- for cost_fn_str in CONF.least_cost_functions:
- if '.' in cost_fn_str:
- short_name = cost_fn_str.split('.')[-1]
- else:
- short_name = cost_fn_str
- cost_fn_str = "%s.%s.%s" % (
- __name__, self.__class__.__name__, short_name)
- if not (short_name.startswith('%s_' % topic) or
- short_name.startswith('noop')):
- continue
-
- try:
- # NOTE: import_class is somewhat misnamed since
- # the weighing function can be any non-class callable
- # (i.e., no 'self')
- cost_fn = importutils.import_class(cost_fn_str)
- except ImportError:
- raise exception.SchedulerCostFunctionNotFound(
- cost_fn_str=cost_fn_str)
-
- try:
- flag_name = "%s_weight" % cost_fn.__name__
- weight = getattr(CONF, flag_name)
- except AttributeError:
- raise exception.SchedulerWeightFlagNotFound(
- flag_name=flag_name)
- cost_fns.append((weight, cost_fn))
-
- self.cost_function_cache[topic] = cost_fns
- return cost_fns
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 2056f968e..6e8e7ea7b 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -17,71 +17,41 @@
Scheduler host filters
"""
-import os
-import types
+from nova import filters
+from nova.openstack.common import log as logging
-from nova import exception
-from nova.openstack.common import importutils
+LOG = logging.getLogger(__name__)
-class BaseHostFilter(object):
+class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
+ def _filter_one(self, obj, filter_properties):
+ """Return True if the object passes the filter, otherwise False."""
+ return self.host_passes(obj, filter_properties)
def host_passes(self, host_state, filter_properties):
+ """Return True if the HostState passes the filter, otherwise False.
+ Override this in a subclass.
+ """
raise NotImplementedError()
- def _full_name(self):
- """module.classname of the filter."""
- return "%s.%s" % (self.__module__, self.__class__.__name__)
+class HostFilterHandler(filters.BaseFilterHandler):
+ def __init__(self):
+ super(HostFilterHandler, self).__init__(BaseHostFilter)
-def _is_filter_class(cls):
- """Return whether a class is a valid Host Filter class."""
- return type(cls) is types.TypeType and issubclass(cls, BaseHostFilter)
+def all_filters():
+ """Return a list of filter classes found in this directory.
-def _get_filter_classes_from_module(module_name):
- """Get all filter classes from a module."""
- classes = []
- module = importutils.import_module(module_name)
- for obj_name in dir(module):
- itm = getattr(module, obj_name)
- if _is_filter_class(itm):
- classes.append(itm)
- return classes
+ This method is used as the default for available scheduler filters
+ and should return a list of all filter classes available.
+ """
+ return HostFilterHandler().get_all_classes()
def standard_filters():
- """Return a list of filter classes found in this directory."""
- classes = []
- filters_dir = __path__[0]
- for dirpath, dirnames, filenames in os.walk(filters_dir):
- relpath = os.path.relpath(dirpath, filters_dir)
- if relpath == '.':
- relpkg = ''
- else:
- relpkg = '.%s' % '.'.join(relpath.split(os.sep))
- for fname in filenames:
- root, ext = os.path.splitext(fname)
- if ext != '.py' or root == '__init__':
- continue
- module_name = "%s%s.%s" % (__package__, relpkg, root)
- mod_classes = _get_filter_classes_from_module(module_name)
- classes.extend(mod_classes)
- return classes
-
-
-def get_filter_classes(filter_class_names):
- """Get filter classes from class names."""
- classes = []
- for cls_name in filter_class_names:
- obj = importutils.import_class(cls_name)
- if _is_filter_class(obj):
- classes.append(obj)
- elif type(obj) is types.FunctionType:
- # Get list of classes from a function
- classes.extend(obj())
- else:
- raise exception.ClassNotFound(class_name=cls_name,
- exception='Not a valid scheduler filter')
- return classes
+ """Deprecated. Configs should change to use all_filters()."""
+ LOG.deprecated(_("Use 'nova.scheduler.filters.all_filters' instead "
+ "of 'nova.scheduler.filters.standard_filters'"))
+ return all_filters()
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index 4e55d0b0c..585acbaf8 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -14,11 +14,23 @@
# under the License.
+from nova import availability_zones
+from nova import db
+from nova.openstack.common import cfg
from nova.scheduler import filters
+CONF = cfg.CONF
+CONF.import_opt('default_availability_zone', 'nova.availability_zones')
+
+
class AvailabilityZoneFilter(filters.BaseHostFilter):
- """Filters Hosts by availability zone."""
+ """Filters Hosts by availability zone.
+
+ Works with aggregate metadata availability zones, using the key
+ 'availability_zone'
+ Note: in theory a compute node can be part of multiple availability_zones
+ """
def host_passes(self, host_state, filter_properties):
spec = filter_properties.get('request_spec', {})
@@ -26,5 +38,12 @@ class AvailabilityZoneFilter(filters.BaseHostFilter):
availability_zone = props.get('availability_zone')
if availability_zone:
- return availability_zone == host_state.service['availability_zone']
+ context = filter_properties['context'].elevated()
+ metadata = db.aggregate_metadata_get_by_host(
+ context, host_state.host, key='availability_zone')
+ if 'availability_zone' in metadata:
+ return availability_zone in metadata['availability_zone']
+ else:
+ return availability_zone == CONF.default_availability_zone
+ return False
return True
diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py
index 73e33178f..acbfa4702 100644
--- a/nova/scheduler/filters/compute_capabilities_filter.py
+++ b/nova/scheduler/filters/compute_capabilities_filter.py
@@ -31,11 +31,20 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return True
for key, req in instance_type['extra_specs'].iteritems():
- # NOTE(jogo) any key containing a scope (scope is terminated
- # by a `:') will be ignored by this filter. (bug 1039386)
- if key.count(':'):
+ # Either not scope format, or in capabilities scope
+ scope = key.split(':')
+ if len(scope) > 1 and scope[0] != "capabilities":
continue
- cap = capabilities.get(key, None)
+ elif scope[0] == "capabilities":
+ del scope[0]
+ cap = capabilities
+ for index in range(0, len(scope)):
+ try:
+ cap = cap.get(scope[index], None)
+ except AttributeError:
+ return False
+ if cap is None:
+ return False
if not extra_specs_ops.match(cap, req):
return False
return True
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 202f8232a..2cdfb91f4 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -13,29 +13,29 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
-from nova import utils
+from nova import servicegroup
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ComputeFilter(filters.BaseHostFilter):
- """Filter on active Compute nodes"""
+ """Filter on active Compute nodes."""
+
+ def __init__(self):
+ self.servicegroup_api = servicegroup.API()
def host_passes(self, host_state, filter_properties):
- """Returns True for only active compute nodes"""
- instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
- return True
+ """Returns True for only active compute nodes."""
capabilities = host_state.capabilities
service = host_state.service
- if not utils.service_is_up(service) or service['disabled']:
+ alive = self.servicegroup_api.service_is_up(service)
+ if not alive or service['disabled']:
LOG.debug(_("%(host_state)s is disabled or has not been "
"heard from in a while"), locals())
return False
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 9c93df930..54561b811 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -28,7 +26,7 @@ cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
default=16.0,
help='Virtual CPU to Physical CPU allocation ratio')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(cpu_allocation_ratio_opt)
@@ -38,7 +36,7 @@ class CoreFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
+ if not instance_type:
return True
if not host_state.vcpus_total:
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index 358583b8a..e7a292c45 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -24,15 +22,15 @@ LOG = logging.getLogger(__name__)
disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio", default=1.0,
help="virtual disk to physical disk allocation ratio")
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(disk_allocation_ratio_opt)
class DiskFilter(filters.BaseHostFilter):
- """Disk Filter with over subscription flag"""
+ """Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
- """Filter based on disk usage"""
+ """Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')
requested_disk = 1024 * (instance_type['root_gb'] +
instance_type['ephemeral_gb'])
diff --git a/nova/scheduler/filters/image_props_filter.py b/nova/scheduler/filters/image_props_filter.py
index f9ef693b3..a545ce9ff 100644
--- a/nova/scheduler/filters/image_props_filter.py
+++ b/nova/scheduler/filters/image_props_filter.py
@@ -26,7 +26,7 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
"""Filter compute nodes that satisfy instance image properties.
The ImagePropertiesFilter filters compute nodes that satisfy
- any architecture, hpervisor type, or virtual machine mode properties
+ any architecture, hypervisor type, or virtual machine mode properties
specified on the instance's image properties. Image properties are
contained in the image dictionary in the request_spec.
"""
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index 1b40bae62..2780ff252 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -25,12 +23,12 @@ max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="Ignore hosts that have too many builds/resizes/snaps/migrations")
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
- """Filter out hosts with too many concurrent I/O operations"""
+ """Filter out hosts with too many concurrent I/O operations."""
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py
index d1d16b80b..37a8f440d 100644
--- a/nova/scheduler/filters/isolated_hosts_filter.py
+++ b/nova/scheduler/filters/isolated_hosts_filter.py
@@ -13,11 +13,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.scheduler import filters
-CONF = config.CONF
+isolated_opts = [
+ cfg.ListOpt('isolated_images',
+ default=[],
+ help='Images to run on isolated host'),
+ cfg.ListOpt('isolated_hosts',
+ default=[],
+ help='Host reserved for specific images'),
+]
+CONF = cfg.CONF
+CONF.register_opts(isolated_opts)
class IsolatedHostsFilter(filters.BaseHostFilter):
diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py
index cfb2698db..2d070ea8e 100644
--- a/nova/scheduler/filters/json_filter.py
+++ b/nova/scheduler/filters/json_filter.py
@@ -51,7 +51,7 @@ class JsonFilter(filters.BaseHostFilter):
return self._op_compare(args, operator.gt)
def _in(self, args):
- """First term is in set of remaining terms"""
+ """First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py
index 17c7ebc22..bdc350f95 100644
--- a/nova/scheduler/filters/num_instances_filter.py
+++ b/nova/scheduler/filters/num_instances_filter.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -25,12 +23,12 @@ max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
default=50,
help="Ignore hosts that have too many instances")
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(max_instances_per_host_opt)
class NumInstancesFilter(filters.BaseHostFilter):
- """Filter out hosts with too many instances"""
+ """Filter out hosts with too many instances."""
def host_passes(self, host_state, filter_properties):
num_instances = host_state.num_instances
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index 85370dc2c..f9d6bb750 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -26,12 +24,12 @@ ram_allocation_ratio_opt = cfg.FloatOpt("ram_allocation_ratio",
default=1.5,
help="virtual ram to physical ram allocation ratio")
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(ram_allocation_ratio_opt)
class RamFilter(filters.BaseHostFilter):
- """Ram Filter with over subscription flag"""
+ """Ram Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
diff --git a/nova/scheduler/filters/retry_filter.py b/nova/scheduler/filters/retry_filter.py
index 6740ec099..4d6ed50ee 100644
--- a/nova/scheduler/filters/retry_filter.py
+++ b/nova/scheduler/filters/retry_filter.py
@@ -20,12 +20,12 @@ LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
- """Filter out hosts that have already been attempted for scheduling
+ """Filter out nodes that have already been attempted for scheduling
purposes
"""
def host_passes(self, host_state, filter_properties):
- """Skip hosts that have already been attempted"""
+ """Skip nodes that have already been attempted."""
retry = filter_properties.get('retry', None)
if not retry:
# Re-scheduling is disabled
@@ -33,10 +33,13 @@ class RetryFilter(filters.BaseHostFilter):
return True
hosts = retry.get('hosts', [])
- host = host_state.host
+ host = [host_state.host, host_state.nodename]
- LOG.debug(_("Previously tried hosts: %(hosts)s. (host=%(host)s)") %
- locals())
+ passes = host not in hosts
+ pass_msg = "passes" if passes else "fails"
+
+ LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: "
+ "%(hosts)s") % locals())
# Host passes if it's not in the list of previously attempted hosts:
- return host not in hosts
+ return passes
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 4fd0488d9..4d0f2305f 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -48,8 +48,6 @@ import httplib
import socket
import ssl
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -82,7 +80,7 @@ trusted_opts = [
help='attestation authorization blob - must change'),
]
-CONF = config.CONF
+CONF = cfg.CONF
trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters')
CONF.register_group(trust_group)
CONF.register_opts(trusted_opts, group=trust_group)
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 1f4b6d956..b472220bd 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -21,18 +21,17 @@ import UserDict
from nova.compute import task_states
from nova.compute import vm_states
-from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
+from nova.scheduler import weights
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
- default=['nova.scheduler.filters.standard_filters'],
+ default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.standard_filters" '
@@ -48,9 +47,12 @@ host_manager_opts = [
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
+ cfg.ListOpt('scheduler_weight_classes',
+ default=['nova.scheduler.weights.all_weighers'],
+ help='Which weight class names to use for weighing hosts'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
@@ -94,10 +96,10 @@ class HostState(object):
previously used and lock down access.
"""
- def __init__(self, host, topic, capabilities=None, service=None):
+ def __init__(self, host, node, capabilities=None, service=None):
self.host = host
- self.topic = topic
- self.update_capabilities(topic, capabilities, service)
+ self.nodename = node
+ self.update_capabilities(capabilities, service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
@@ -126,19 +128,20 @@ class HostState(object):
self.updated = None
- def update_capabilities(self, topic, capabilities=None, service=None):
+ def update_capabilities(self, capabilities=None, service=None):
# Read-only capability dicts
if capabilities is None:
capabilities = {}
- self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
+ self.capabilities = ReadOnlyDict(capabilities)
if service is None:
service = {}
self.service = ReadOnlyDict(service)
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
- if self.updated and self.updated > compute['updated_at']:
+ if (self.updated and compute['updated_at']
+ and self.updated > compute['updated_at']):
return
all_ram_mb = compute['memory_mb']
@@ -193,7 +196,7 @@ class HostState(object):
self.num_io_ops = int(statmap.get('io_workload', 0))
def consume_from_instance(self, instance):
- """Incrementally update host state from an instance"""
+ """Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
@@ -240,35 +243,9 @@ class HostState(object):
def _statmap(self, stats):
return dict((st['key'], st['value']) for st in stats)
- def passes_filters(self, filter_fns, filter_properties):
- """Return whether or not this host passes filters."""
-
- if self.host in filter_properties.get('ignore_hosts', []):
- LOG.debug(_('Host filter fails for ignored host %(host)s'),
- {'host': self.host})
- return False
-
- force_hosts = filter_properties.get('force_hosts', [])
- if force_hosts:
- if not self.host in force_hosts:
- LOG.debug(_('Host filter fails for non-forced host %(host)s'),
- {'host': self.host})
- return self.host in force_hosts
-
- for filter_fn in filter_fns:
- if not filter_fn(self, filter_properties):
- LOG.debug(_('Host filter function %(func)s failed for '
- '%(host)s'),
- {'func': repr(filter_fn),
- 'host': self.host})
- return False
-
- LOG.debug(_('Host filter passes for %(host)s'), {'host': self.host})
- return True
-
def __repr__(self):
- return ("%s ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
- (self.host, self.free_ram_mb, self.free_disk_mb,
+ return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
+ (self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances, self.allowed_vm_type))
@@ -279,34 +256,34 @@ class HostManager(object):
host_state_cls = HostState
def __init__(self):
- self.service_states = {} # { <host> : { <service> : { cap k : v }}}
+ # { (host, hypervisor_hostname) : { <service> : { cap k : v }}}
+ self.service_states = {}
self.host_state_map = {}
- self.filter_classes = filters.get_filter_classes(
+ self.filter_handler = filters.HostFilterHandler()
+ self.filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ CONF.scheduler_weight_classes)
- def _choose_host_filters(self, filters):
+ def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
- if filters is None:
- filters = CONF.scheduler_default_filters
- if not isinstance(filters, (list, tuple)):
- filters = [filters]
+ if filter_cls_names is None:
+ filter_cls_names = CONF.scheduler_default_filters
+ if not isinstance(filter_cls_names, (list, tuple)):
+ filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
- for filter_name in filters:
+ for filter_name in filter_cls_names:
found_class = False
for cls in self.filter_classes:
if cls.__name__ == filter_name:
+ good_filters.append(cls)
found_class = True
- filter_instance = cls()
- # Get the filter function
- filter_func = getattr(filter_instance,
- 'host_passes', None)
- if filter_func:
- good_filters.append(filter_func)
break
if not found_class:
bad_filters.append(filter_name)
@@ -315,39 +292,77 @@ class HostManager(object):
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
- def filter_hosts(self, hosts, filter_properties, filters=None):
- """Filter hosts and return only ones passing all filters"""
- filtered_hosts = []
- filter_fns = self._choose_host_filters(filters)
- for host in hosts:
- if host.passes_filters(filter_fns, filter_properties):
- filtered_hosts.append(host)
- return filtered_hosts
+ def get_filtered_hosts(self, hosts, filter_properties,
+ filter_class_names=None):
+ """Filter hosts and return only ones passing all filters."""
+
+ def _strip_ignore_hosts(host_map, hosts_to_ignore):
+ ignored_hosts = []
+ for host in hosts_to_ignore:
+ if host in host_map:
+ del host_map[host]
+ ignored_hosts.append(host)
+ ignored_hosts_str = ', '.join(ignored_hosts)
+ msg = _('Host filter ignoring hosts: %(ignored_hosts_str)s')
+ LOG.debug(msg, locals())
+
+ def _match_forced_hosts(host_map, hosts_to_force):
+ for host in host_map.keys():
+ if host not in hosts_to_force:
+ del host_map[host]
+ if not host_map:
+ forced_hosts_str = ', '.join(hosts_to_force)
+ msg = _("No hosts matched due to not matching 'force_hosts'"
+ "value of '%(forced_hosts_str)s'")
+ LOG.debug(msg, locals())
+ return
+ forced_hosts_str = ', '.join(host_map.iterkeys())
+ msg = _('Host filter forcing available hosts to '
+ '%(forced_hosts_str)s')
+ LOG.debug(msg, locals())
+
+ filter_classes = self._choose_host_filters(filter_class_names)
+ ignore_hosts = filter_properties.get('ignore_hosts', [])
+ force_hosts = filter_properties.get('force_hosts', [])
+ if ignore_hosts or force_hosts:
+ name_to_cls_map = dict([(x.host, x) for x in hosts])
+ if ignore_hosts:
+ _strip_ignore_hosts(name_to_cls_map, ignore_hosts)
+ if force_hosts:
+ _match_forced_hosts(name_to_cls_map, force_hosts)
+ if not name_to_cls_map:
+ return []
+ hosts = name_to_cls_map.itervalues()
+
+ return self.filter_handler.get_filtered_objects(filter_classes,
+ hosts, filter_properties)
+
+ def get_weighed_hosts(self, hosts, weight_properties):
+ """Weigh the hosts."""
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
+
+ if service_name != 'compute':
+ LOG.debug(_('Ignoring %(service_name)s service update '
+ 'from %(host)s'), locals())
+ return
+
+ state_key = (host, capabilities.get('hypervisor_hostname'))
LOG.debug(_("Received %(service_name)s service update from "
- "%(host)s.") % locals())
- service_caps = self.service_states.get(host, {})
+ "%(state_key)s.") % locals())
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
- service_caps[service_name] = capab_copy
- self.service_states[host] = service_caps
+ self.service_states[state_key] = capab_copy
- def get_all_host_states(self, context, topic):
+ def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
-
- Note: this can be very slow with a lot of instances.
- InstanceType table isn't required since a copy is stored
- with the instance (in case the InstanceType changed since the
- instance was created)."""
-
- if topic != CONF.compute_topic:
- raise NotImplementedError(_(
- "host_manager only implemented for 'compute'"))
+ """
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
@@ -357,16 +372,18 @@ class HostManager(object):
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
- capabilities = self.service_states.get(host, None)
- host_state = self.host_state_map.get(host)
+ node = compute.get('hypervisor_hostname')
+ state_key = (host, node)
+ capabilities = self.service_states.get(state_key, None)
+ host_state = self.host_state_map.get(state_key)
if host_state:
- host_state.update_capabilities(topic, capabilities,
+ host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
- host_state = self.host_state_cls(host, topic,
+ host_state = self.host_state_cls(host, node,
capabilities=capabilities,
service=dict(service.iteritems()))
- self.host_state_map[host] = host_state
+ self.host_state_map[state_key] = host_state
host_state.update_from_compute_node(compute)
return self.host_state_map.itervalues()
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
deleted file mode 100644
index d3eaee735..000000000
--- a/nova/scheduler/least_cost.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Least Cost is an algorithm for choosing which host machines to
-provision a set of resources to. The input is a WeightedHost object which
-is decided upon by a set of objective-functions, called the 'cost-functions'.
-The WeightedHost contains a combined weight for each cost-function.
-
-The cost-function and weights are tabulated, and the host with the least cost
-is then selected for provisioning.
-"""
-
-from nova import config
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-least_cost_opts = [
- cfg.ListOpt('least_cost_functions',
- default=[
- 'nova.scheduler.least_cost.compute_fill_first_cost_fn'
- ],
- help='Which cost functions the LeastCostScheduler should use'),
- cfg.FloatOpt('noop_cost_fn_weight',
- default=1.0,
- help='How much weight to give the noop cost function'),
- cfg.FloatOpt('compute_fill_first_cost_fn_weight',
- default=-1.0,
- help='How much weight to give the fill-first cost function. '
- 'A negative value will reverse behavior: '
- 'e.g. spread-first'),
- ]
-
-CONF = config.CONF
-CONF.register_opts(least_cost_opts)
-
-# TODO(sirp): Once we have enough of these rules, we can break them out into a
-# cost_functions.py file (perhaps in a least_cost_scheduler directory)
-
-
-class WeightedHost(object):
- """Reduced set of information about a host that has been weighed.
- This is an attempt to remove some of the ad-hoc dict structures
- previously used."""
-
- def __init__(self, weight, host_state=None):
- self.weight = weight
- self.host_state = host_state
-
- def to_dict(self):
- x = dict(weight=self.weight)
- if self.host_state:
- x['host'] = self.host_state.host
- return x
-
- def __repr__(self):
- if self.host_state:
- return "WeightedHost host: %s" % self.host_state.host
- return "WeightedHost with no host_state"
-
-
-def noop_cost_fn(host_state, weighing_properties):
- """Return a pre-weight cost of 1 for each host"""
- return 1
-
-
-def compute_fill_first_cost_fn(host_state, weighing_properties):
- """More free ram = higher weight. So servers with less free
- ram will be preferred.
-
- Note: the weight for this function in default configuration
- is -1.0. With a -1.0 this function runs in reverse, so systems
- with the most free memory will be preferred.
- """
- return host_state.free_ram_mb
-
-
-def weighted_sum(weighted_fns, host_states, weighing_properties):
- """Use the weighted-sum method to compute a score for an array of objects.
-
- Normalize the results of the objective-functions so that the weights are
- meaningful regardless of objective-function's range.
-
- :param host_list: ``[(host, HostInfo()), ...]``
- :param weighted_fns: list of weights and functions like::
-
- [(weight, objective-functions), ...]
-
- :param weighing_properties: an arbitrary dict of values that can
- influence weights.
-
- :returns: a single WeightedHost object which represents the best
- candidate.
- """
-
- min_score, best_host = None, None
- for host_state in host_states:
- score = sum(weight * fn(host_state, weighing_properties)
- for weight, fn in weighted_fns)
- if min_score is None or score < min_score:
- min_score, best_host = score, host_state
-
- return WeightedHost(min_score, host_state=best_host)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 77f5a0259..84bdcddb5 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -26,11 +26,9 @@ import sys
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova import config
import nova.context
from nova import db
from nova import exception
-from nova import flags
from nova import manager
from nova import notifications
from nova.openstack.common import cfg
@@ -47,7 +45,7 @@ scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='nova.scheduler.filter_scheduler.FilterScheduler',
help='Default driver to use for the scheduler')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
@@ -56,7 +54,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
- RPC_API_VERSION = '2.3'
+ RPC_API_VERSION = '2.5'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@@ -74,10 +72,13 @@ class SchedulerManager(manager.Manager):
def update_service_capabilities(self, context, service_name,
host, capabilities):
"""Process a capability update from a service node."""
- if capabilities is None:
- capabilities = {}
- self.driver.update_service_capabilities(service_name, host,
- capabilities)
+ if not isinstance(capabilities, list):
+ capabilities = [capabilities]
+ for capability in capabilities:
+ if capability is None:
+ capability = {}
+ self.driver.update_service_capabilities(service_name, host,
+ capability)
def create_volume(self, context, volume_id, snapshot_id,
reservations=None, image_id=None):
@@ -154,7 +155,7 @@ class SchedulerManager(manager.Manager):
def _set_vm_state_and_notify(self, method, updates, context, ex,
request_spec):
- """changes VM state and notifies"""
+ """changes VM state and notifies."""
# FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
# scheduler manager like this. We should make this easier.
# run_instance only sends a request_spec, and an instance may or may
@@ -219,13 +220,12 @@ class SchedulerManager(manager.Manager):
"""
# Getting compute node info and related instances info
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
+ service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
+ service_ref['host'])
# Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
+ compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
@@ -262,3 +262,6 @@ class SchedulerManager(manager.Manager):
@manager.periodic_task
def _expire_reservations(self, context):
QUOTAS.expire(context)
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index 7c68bb12a..a92e09556 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -27,8 +27,6 @@ schedule requests to compute nodes but provide their own manager and topic.
https://bugs.launchpad.net/nova/+bug/1009681
"""
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.scheduler import driver
@@ -44,7 +42,7 @@ multi_scheduler_opts = [
help='Default driver to use for scheduling calls'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(multi_scheduler_opts)
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index cbb6712db..c3a37d6ad 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -18,12 +18,18 @@
Client side of the scheduler manager RPC API.
"""
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
-CONF = config.CONF
+rpcapi_opts = [
+ cfg.StrOpt('scheduler_topic',
+ default='scheduler',
+ help='the topic scheduler nodes listen on'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(rpcapi_opts)
class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -47,6 +53,9 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.1 - Add image_id to create_volume()
2.2 - Remove reservations argument to create_volume()
2.3 - Remove create_volume()
+ 2.4 - Change update_service_capabilities()
+ - accepts a list of capabilities
+ 2.5 - Add get_backdoor_port()
'''
#
@@ -77,11 +86,13 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
request_spec, filter_properties, reservations):
instance_p = jsonutils.to_primitive(instance)
instance_type_p = jsonutils.to_primitive(instance_type)
+ reservations_p = jsonutils.to_primitive(reservations)
+ image_p = jsonutils.to_primitive(image)
self.cast(ctxt, self.make_msg('prep_resize',
instance=instance_p, instance_type=instance_type_p,
- image=image, request_spec=request_spec,
+ image=image_p, request_spec=request_spec,
filter_properties=filter_properties,
- reservations=reservations))
+ reservations=reservations_p))
def show_host_resources(self, ctxt, host):
return self.call(ctxt, self.make_msg('show_host_resources', host=host))
@@ -100,4 +111,9 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
service_name=service_name, host=host,
- capabilities=capabilities))
+ capabilities=capabilities),
+ version='2.4')
+
+ def get_backdoor_port(self, context, host):
+ return self.call(context, self.make_msg('get_backdoor_port'),
+ version='2.5')
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
index e8be0070b..e0840dd01 100644
--- a/nova/scheduler/scheduler_options.py
+++ b/nova/scheduler/scheduler_options.py
@@ -26,8 +26,6 @@ import datetime
import json
import os
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -38,7 +36,7 @@ scheduler_json_config_location_opt = cfg.StrOpt(
default='',
help='Absolute path to scheduler configuration JSON file.')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(scheduler_json_config_location_opt)
LOG = logging.getLogger(__name__)
diff --git a/nova/scheduler/weights/__init__.py b/nova/scheduler/weights/__init__.py
new file mode 100644
index 000000000..b979b1e55
--- /dev/null
+++ b/nova/scheduler/weights/__init__.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Scheduler host weights
+"""
+
+
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.scheduler.weights import least_cost
+from nova import weights
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class WeighedHost(weights.WeighedObject):
+ def to_dict(self):
+ x = dict(weight=self.weight)
+ x['host'] = self.obj.host
+ return x
+
+ def __repr__(self):
+ return "WeighedHost [host: %s, weight: %s]" % (
+ self.obj.host, self.weight)
+
+
+class BaseHostWeigher(weights.BaseWeigher):
+ """Base class for host weights."""
+ pass
+
+
+class HostWeightHandler(weights.BaseWeightHandler):
+ object_class = WeighedHost
+
+ def __init__(self):
+ super(HostWeightHandler, self).__init__(BaseHostWeigher)
+
+
+def all_weighers():
+ """Return a list of weight plugin classes found in this directory."""
+
+ if (CONF.least_cost_functions is not None or
+ CONF.compute_fill_first_cost_fn_weight is not None):
+ LOG.deprecated(_('least_cost has been deprecated in favor of '
+ 'the RAM Weigher.'))
+ return least_cost.get_least_cost_weighers()
+ return HostWeightHandler().get_all_classes()
diff --git a/nova/scheduler/weights/least_cost.py b/nova/scheduler/weights/least_cost.py
new file mode 100644
index 000000000..26b9e7a8c
--- /dev/null
+++ b/nova/scheduler/weights/least_cost.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Least Cost is an algorithm for choosing which host machines to
+provision a set of resources to. The input is a WeightedHost object which
+is decided upon by a set of objective-functions, called the 'cost-functions'.
+The WeightedHost contains a combined weight for each cost-function.
+
+The cost-function and weights are tabulated, and the host with the least cost
+is then selected for provisioning.
+
+NOTE(comstud): This is deprecated. One should use the RAMWeigher and/or
+create other weight modules.
+"""
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+least_cost_opts = [
+ cfg.ListOpt('least_cost_functions',
+ default=None,
+ help='Which cost functions the LeastCostScheduler should use'),
+ cfg.FloatOpt('noop_cost_fn_weight',
+ default=1.0,
+ help='How much weight to give the noop cost function'),
+ cfg.FloatOpt('compute_fill_first_cost_fn_weight',
+ default=None,
+ help='How much weight to give the fill-first cost function. '
+ 'A negative value will reverse behavior: '
+ 'e.g. spread-first'),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(least_cost_opts)
+
+
+def noop_cost_fn(host_state, weight_properties):
+ """Return a pre-weight cost of 1 for each host."""
+ return 1
+
+
+def compute_fill_first_cost_fn(host_state, weight_properties):
+ """Higher weights win, so we should return a lower weight
+ when there's more free ram available.
+
+ Note: the weight modifier for this function in default configuration
+ is -1.0. With -1.0 this function runs in reverse, so systems
+ with the most free memory will be preferred.
+ """
+ return -host_state.free_ram_mb
+
+
+def _get_cost_functions():
+ """Returns a list of tuples containing weights and cost functions to
+ use for weighing hosts
+ """
+ cost_fns_conf = CONF.least_cost_functions
+ if cost_fns_conf is None:
+ # The old default. This will get fixed up below.
+ fn_str = 'nova.scheduler.least_cost.compute_fill_first_cost_fn'
+ cost_fns_conf = [fn_str]
+ cost_fns = []
+ for cost_fn_str in cost_fns_conf:
+ short_name = cost_fn_str.split('.')[-1]
+ if not (short_name.startswith('compute_') or
+ short_name.startswith('noop')):
+ continue
+ # Fix up any old paths to the new paths
+ if cost_fn_str.startswith('nova.scheduler.least_cost.'):
+ cost_fn_str = ('nova.scheduler.weights.least_cost' +
+ cost_fn_str[25:])
+ try:
+ # NOTE: import_class is somewhat misnamed since
+ # the weighing function can be any non-class callable
+ # (i.e., no 'self')
+ cost_fn = importutils.import_class(cost_fn_str)
+ except ImportError:
+ raise exception.SchedulerCostFunctionNotFound(
+ cost_fn_str=cost_fn_str)
+
+ try:
+ flag_name = "%s_weight" % cost_fn.__name__
+ weight = getattr(CONF, flag_name)
+ except AttributeError:
+ raise exception.SchedulerWeightFlagNotFound(
+ flag_name=flag_name)
+ # Set the original default.
+ if (flag_name == 'compute_fill_first_cost_fn_weight' and
+ weight is None):
+ weight = -1.0
+ cost_fns.append((weight, cost_fn))
+ return cost_fns
+
+
+def get_least_cost_weighers():
+ cost_functions = _get_cost_functions()
+
+ # Unfortunately we need to import this late so we don't have an
+ # import loop.
+ from nova.scheduler import weights
+
+ class _LeastCostWeigher(weights.BaseHostWeigher):
+ def weigh_objects(self, weighted_hosts, weight_properties):
+ for host in weighted_hosts:
+ host.weight = sum(weight * fn(host.obj, weight_properties)
+ for weight, fn in cost_functions)
+
+ return [_LeastCostWeigher]
diff --git a/nova/scheduler/weights/ram.py b/nova/scheduler/weights/ram.py
new file mode 100644
index 000000000..ea4cfab38
--- /dev/null
+++ b/nova/scheduler/weights/ram.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+RAM Weigher. Weigh hosts by their RAM usage.
+
+The default is to spread instances across all hosts evenly. If you prefer
+stacking, you can set the 'ram_weight_multiplier' option to a negative
+number and the weighing has the opposite effect of the default.
+"""
+
+from nova.openstack.common import cfg
+from nova.scheduler import weights
+
+
+ram_weight_opts = [
+ cfg.FloatOpt('ram_weight_multiplier',
+ default=1.0,
+ help='Multiplier used for weighing ram. Negative '
+ 'numbers mean to stack vs spread.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(ram_weight_opts)
+
+
+class RAMWeigher(weights.BaseHostWeigher):
+ def _weight_multiplier(self):
+ """Override the weight multiplier."""
+ return CONF.ram_weight_multiplier
+
+ def _weigh_object(self, host_state, weight_properties):
+ """Higher weights win. We want spreading to be the default."""
+ return host_state.free_ram_mb
diff --git a/nova/service.py b/nova/service.py
index 45bdf7ec0..0fde14baa 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -30,35 +30,37 @@ import time
import eventlet
import greenlet
-from nova.common import eventlet_backdoor
-from nova import config
+from nova import conductor
from nova import context
-from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
+from nova.openstack.common.rpc import common as rpc_common
+from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
-
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
- cfg.IntOpt('periodic_interval',
- default=60,
- help='seconds between running periodic tasks'),
+ cfg.BoolOpt('periodic_enable',
+ default=True,
+ help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
+ cfg.ListOpt('enabled_apis',
+ default=['ec2', 'osapi_compute', 'metadata'],
+ help='a list of APIs to enable by default'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
@@ -89,10 +91,29 @@ service_opts = [
cfg.IntOpt('metadata_workers',
default=None,
help='Number of workers for metadata service'),
+ cfg.StrOpt('compute_manager',
+ default='nova.compute.manager.ComputeManager',
+ help='full class name for the Manager for compute'),
+ cfg.StrOpt('console_manager',
+ default='nova.console.manager.ConsoleProxyManager',
+ help='full class name for the Manager for console proxy'),
+ cfg.StrOpt('cert_manager',
+ default='nova.cert.manager.CertManager',
+ help='full class name for the Manager for cert'),
+ cfg.StrOpt('network_manager',
+ default='nova.network.manager.VlanManager',
+ help='full class name for the Manager for network'),
+ cfg.StrOpt('scheduler_manager',
+ default='nova.scheduler.manager.SchedulerManager',
+ help='full class name for the Manager for scheduler'),
+ cfg.IntOpt('service_down_time',
+ default=60,
+ help='maximum time since last check-in for up service'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(service_opts)
+CONF.import_opt('host', 'nova.netconf')
class SignalExit(SystemExit):
@@ -111,7 +132,7 @@ class Launcher(object):
"""
self._services = []
- eventlet_backdoor.initialize_if_enabled()
+ self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_server(server):
@@ -131,6 +152,8 @@ class Launcher(object):
:returns: None
"""
+ if self.backdoor_port is not None:
+ server.backdoor_port = self.backdoor_port
gt = eventlet.spawn(self.run_server, server)
self._services.append(gt)
@@ -333,7 +356,7 @@ class ProcessLauncher(object):
return wrap
def wait(self):
- """Loop waiting on children to die and respawning as necessary"""
+ """Loop waiting on children to die and respawning as necessary."""
while self.running:
wrap = self._wait_child()
if not wrap:
@@ -369,7 +392,8 @@ class Service(object):
it state to the database services table."""
def __init__(self, host, binary, topic, manager, report_interval=None,
- periodic_interval=None, periodic_fuzzy_delay=None,
+ periodic_enable=None, periodic_fuzzy_delay=None,
+ periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
self.host = host
self.binary = binary
@@ -378,32 +402,41 @@ class Service(object):
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
- self.periodic_interval = periodic_interval
+ self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
+ self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
+ self.backdoor_port = None
+ self.db_allowed = db_allowed
+ self.conductor_api = conductor.API(use_local=db_allowed)
+ self.conductor_api.wait_until_ready(context.get_admin_context())
+ self.servicegroup_api = servicegroup.API()
def start(self):
- vcs_string = version.version_string_with_vcs()
- LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'),
- {'topic': self.topic, 'vcs_string': vcs_string})
+ verstr = version.version_string_with_package()
+ LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
+ {'topic': self.topic, 'version': verstr})
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
- service_ref = db.service_get_by_args(ctxt,
- self.host,
- self.binary)
+ service_ref = self.conductor_api.service_get_by_args(ctxt,
+ self.host,
+ self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
- self.manager.pre_start_hook()
+ if self.backdoor_port is not None:
+ self.manager.backdoor_port = self.backdoor_port
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
+ self.manager.pre_start_hook(rpc_connection=self.conn)
+
rpc_dispatcher = self.manager.create_rpc_dispatcher()
# Share this same connection for these Consumers
@@ -419,32 +452,33 @@ class Service(object):
self.manager.post_start_hook()
- if self.report_interval:
- pulse = utils.LoopingCall(self.report_state)
- pulse.start(interval=self.report_interval,
- initial_delay=self.report_interval)
+ LOG.debug(_("Join ServiceGroup membership for this service %s")
+ % self.topic)
+ # Add service to the ServiceGroup membership group.
+ pulse = self.servicegroup_api.join(self.host, self.topic, self)
+ if pulse:
self.timers.append(pulse)
- if self.periodic_interval:
+ if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
- periodic = utils.LoopingCall(self.periodic_tasks)
- periodic.start(interval=self.periodic_interval,
- initial_delay=initial_delay)
+ periodic = utils.DynamicLoopingCall(self.periodic_tasks)
+ periodic.start(initial_delay=initial_delay,
+ periodic_interval_max=self.periodic_interval_max)
self.timers.append(periodic)
def _create_service_ref(self, context):
- zone = CONF.node_availability_zone
- service_ref = db.service_create(context,
- {'host': self.host,
- 'binary': self.binary,
- 'topic': self.topic,
- 'report_count': 0,
- 'availability_zone': zone})
- self.service_id = service_ref['id']
+ svc_values = {
+ 'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0
+ }
+ service = self.conductor_api.service_create(context, svc_values)
+ self.service_id = service['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
@@ -452,8 +486,9 @@ class Service(object):
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
- report_interval=None, periodic_interval=None,
- periodic_fuzzy_delay=None):
+ report_interval=None, periodic_enable=None,
+ periodic_fuzzy_delay=None, periodic_interval_max=None,
+ db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
@@ -461,8 +496,9 @@ class Service(object):
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
- :param periodic_interval: defaults to CONF.periodic_interval
+ :param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
+ :param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
@@ -472,18 +508,21 @@ class Service(object):
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
- manager = CONF.get('%s_manager' %
- binary.rpartition('nova-')[2], None)
+ manager_cls = ('%s_manager' %
+ binary.rpartition('nova-')[2])
+ manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
- if periodic_interval is None:
- periodic_interval = CONF.periodic_interval
+ if periodic_enable is None:
+ periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
- periodic_interval=periodic_interval,
- periodic_fuzzy_delay=periodic_fuzzy_delay)
+ periodic_enable=periodic_enable,
+ periodic_fuzzy_delay=periodic_fuzzy_delay,
+ periodic_interval_max=periodic_interval_max,
+ db_allowed=db_allowed)
return service_obj
@@ -491,7 +530,8 @@ class Service(object):
"""Destroy the service object in the datastore."""
self.stop()
try:
- db.service_destroy(context.get_admin_context(), self.service_id)
+ self.conductor_api.service_destroy(context.get_admin_context(),
+ self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
@@ -519,39 +559,7 @@ class Service(object):
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
- self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
-
- def report_state(self):
- """Update the state of this service in the datastore."""
- ctxt = context.get_admin_context()
- zone = CONF.node_availability_zone
- state_catalog = {}
- try:
- try:
- service_ref = db.service_get(ctxt, self.service_id)
- except exception.NotFound:
- LOG.debug(_('The service database object disappeared, '
- 'Recreating it.'))
- self._create_service_ref(ctxt)
- service_ref = db.service_get(ctxt, self.service_id)
-
- state_catalog['report_count'] = service_ref['report_count'] + 1
- if zone != service_ref['availability_zone']:
- state_catalog['availability_zone'] = zone
-
- db.service_update(ctxt,
- self.service_id, state_catalog)
-
- # TODO(termie): make this pattern be more elegant.
- if getattr(self, 'model_disconnected', False):
- self.model_disconnected = False
- LOG.error(_('Recovered model server connection!'))
-
- # TODO(vish): this should probably only catch connection errors
- except Exception: # pylint: disable=W0702
- if not getattr(self, 'model_disconnected', False):
- self.model_disconnected = True
- LOG.exception(_('model server went away'))
+ return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
class WSGIService(object):
@@ -578,6 +586,7 @@ class WSGIService(object):
port=self.port)
# Pull back actual port used
self.port = self.server.port
+ self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
@@ -612,6 +621,8 @@ class WSGIService(object):
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
+ if self.backdoor_port is not None:
+ self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
diff --git a/nova/servicegroup/__init__.py b/nova/servicegroup/__init__.py
new file mode 100644
index 000000000..318d30ff7
--- /dev/null
+++ b/nova/servicegroup/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (c) IBM 2012 Pavel Kravchenco <kpavel at il dot ibm dot com>
+# Copyright (c) AT&T Labs Inc. 2012 Yun Mao <yunmao@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The membership service for Nova. Different implementations can be plugged
+according to the Nova configuration.
+"""
+
+from nova.servicegroup.api import API
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
new file mode 100644
index 000000000..ebd0ee6ac
--- /dev/null
+++ b/nova/servicegroup/api.py
@@ -0,0 +1,137 @@
+# Copyright (c) IBM 2012 Pavel Kravchenco <kpavel at il dot ibm dot com>
+# Alexey Roytman <roytman at il dot ibm dot com>
+# Copyright (c) AT&T Labs Inc. 2012 Yun Mao <yunmao@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define APIs for the servicegroup access."""
+
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import lockutils
+from nova.openstack.common import log as logging
+from nova import utils
+
+from random import choice
+
+
+LOG = logging.getLogger(__name__)
+_default_driver = 'db'
+servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver',
+ default=_default_driver,
+ help='The driver for servicegroup service.')
+
+CONF = cfg.CONF
+CONF.register_opt(servicegroup_driver_opt)
+
+
+class API(object):
+
+ _driver = None
+ _driver_name_class_mapping = {
+ 'db': 'nova.servicegroup.drivers.db.DbDriver'
+ }
+
+ @lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
+ def __new__(cls, *args, **kwargs):
+
+ if not cls._driver:
+ LOG.debug(_('ServiceGroup driver defined as an instance of %s'),
+ str(CONF.servicegroup_driver))
+ driver_name = CONF.servicegroup_driver
+ try:
+ driver_class = cls._driver_name_class_mapping[driver_name]
+ except KeyError:
+ raise TypeError(_("unknown ServiceGroup driver name: %s")
+ % driver_name)
+ cls._driver = importutils.import_object(driver_class)
+ utils.check_isinstance(cls._driver, ServiceGroupDriver)
+ # we don't have to check that cls._driver is not NONE,
+ # check_isinstance does it
+ return super(API, cls).__new__(cls)
+
+ def join(self, member_id, group_id, service=None):
+ """Add a new member to the ServiceGroup
+
+ @param member_id: the joined member ID
+ @param group_id: the group name, of the joined member
+ @param service: the parameter can be used for notifications about
+ disconnect mode and update some internals
+ """
+ msg = _('Join new ServiceGroup member %(member_id)s to the '
+ '%(group_id)s group, service = %(service)s')
+ LOG.debug(msg, locals())
+ return self._driver.join(member_id, group_id, service)
+
+ def service_is_up(self, member):
+ """Check if the given member is up."""
+ msg = _('Check if the given member [%s] is part of the '
+ 'ServiceGroup, is up')
+ LOG.debug(msg, member)
+ return self._driver.is_up(member)
+
+ def leave(self, member_id, group_id):
+ """Explicitly remove the given member from the ServiceGroup
+ monitoring.
+ """
+ msg = _('Explicitly remove the given member %(member_id)s from the'
+ '%(group_id)s group monitoring')
+ LOG.debug(msg, locals())
+ return self._driver.leave(member_id, group_id)
+
+ def get_all(self, group_id):
+ """Returns ALL members of the given group."""
+ LOG.debug(_('Returns ALL members of the [%s] '
+ 'ServiceGroup'), group_id)
+ return self._driver.get_all(group_id)
+
+ def get_one(self, group_id):
+ """Returns one member of the given group. The strategy to select
+ the member is decided by the driver (e.g. random or round-robin).
+ """
+ LOG.debug(_('Returns one member of the [%s] group'), group_id)
+ return self._driver.get_one(group_id)
+
+
+class ServiceGroupDriver(object):
+ """Base class for ServiceGroup drivers."""
+
+ def join(self, member_id, group_id, service=None):
+ """Join the given service with it's group."""
+ raise NotImplementedError()
+
+ def is_up(self, member):
+ """Check whether the given member is up."""
+ raise NotImplementedError()
+
+ def leave(self, member_id, group_id):
+ """Remove the given member from the ServiceGroup monitoring."""
+ raise NotImplementedError()
+
+ def get_all(self, group_id):
+ """Returns ALL members of the given group."""
+ raise NotImplementedError()
+
+ def get_one(self, group_id):
+ """The default behavior of get_one is to randomly pick one from
+ the result of get_all(). This is likely to be overridden in the
+ actual driver implementation.
+ """
+ members = self.get_all(group_id)
+ if members is None:
+ return None
+ length = len(members)
+ if length == 0:
+ return None
+ return choice(members)
diff --git a/nova/servicegroup/drivers/__init__.py b/nova/servicegroup/drivers/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/servicegroup/drivers/__init__.py
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
new file mode 100644
index 000000000..075db3ed8
--- /dev/null
+++ b/nova/servicegroup/drivers/db.py
@@ -0,0 +1,101 @@
+# Copyright (c) IBM 2012 Pavel Kravchenco <kpavel at il dot ibm dot com>
+# Alexey Roytman <roytman at il dot ibm dot com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova.servicegroup import api
+from nova import utils
+
+
+CONF = cfg.CONF
+CONF.import_opt('service_down_time', 'nova.service')
+
+LOG = logging.getLogger(__name__)
+
+
+class DbDriver(api.ServiceGroupDriver):
+
+ def join(self, member_id, group_id, service=None):
+ """Join the given service with it's group."""
+
+ msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to '
+ 'the %(group_id)s group, service = %(service)s')
+ LOG.debug(msg, locals())
+ if service is None:
+ raise RuntimeError(_('service is a mandatory argument for DB based'
+ ' ServiceGroup driver'))
+ report_interval = service.report_interval
+ if report_interval:
+ pulse = utils.FixedIntervalLoopingCall(self._report_state, service)
+ pulse.start(interval=report_interval,
+ initial_delay=report_interval)
+ return pulse
+
+ def is_up(self, service_ref):
+ """Moved from nova.utils
+ Check whether a service is up based on last heartbeat.
+ """
+ last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
+ # Timestamps in DB are UTC.
+ elapsed = utils.total_seconds(timeutils.utcnow() - last_heartbeat)
+ LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s',
+ {'lhb': str(last_heartbeat), 'el': str(elapsed)})
+ return abs(elapsed) <= CONF.service_down_time
+
+ def get_all(self, group_id):
+ """
+ Returns ALL members of the given group
+ """
+ LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id)
+ rs = []
+ ctxt = context.get_admin_context()
+ for service in db.service_get_all_by_topic(ctxt, group_id):
+ if self.is_up(service):
+ rs.append(service['host'])
+ return rs
+
+ def _report_state(self, service):
+ """Update the state of this service in the datastore."""
+ ctxt = context.get_admin_context()
+ state_catalog = {}
+ try:
+ try:
+ service_ref = db.service_get(ctxt, service.service_id)
+ except exception.NotFound:
+ LOG.debug(_('The service database object disappeared, '
+ 'Recreating it.'))
+ service._create_service_ref(ctxt)
+ service_ref = db.service_get(ctxt, service.service_id)
+
+ state_catalog['report_count'] = service_ref['report_count'] + 1
+
+ db.service_update(ctxt,
+ service.service_id, state_catalog)
+
+ # TODO(termie): make this pattern be more elegant.
+ if getattr(service, 'model_disconnected', False):
+ service.model_disconnected = False
+ LOG.error(_('Recovered model server connection!'))
+
+ # TODO(vish): this should probably only catch connection errors
+ except Exception: # pylint: disable=W0702
+ if not getattr(service, 'model_disconnected', False):
+ service.model_disconnected = True
+ LOG.exception(_('model server went away'))
diff --git a/nova/test.py b/nova/test.py
index cd82d74e2..b3f851dc4 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -23,206 +23,248 @@ inline callbacks.
"""
-import functools
+import os
+import shutil
import sys
import uuid
+import eventlet
+import fixtures
import mox
-import nose.plugins.skip
import stubout
import testtools
-from nova import config
-from nova import flags
+from nova import context
+from nova import db
+from nova.db import migration
+from nova.db.sqlalchemy import session
+from nova.network import manager as network_manager
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova import paths
from nova import service
-from nova import tests
-from nova.tests import fake_flags
+from nova.tests import conf_fixture
+from nova.tests import policy_fixture
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
- cfg.BoolOpt('fake_tests',
- default=True,
- help='should we use everything for testing'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(test_opts)
+CONF = cfg.CONF
+CONF.register_opts(test_opts)
+CONF.import_opt('sql_connection', 'nova.db.sqlalchemy.session')
+CONF.import_opt('sqlite_db', 'nova.db.sqlalchemy.session')
+CONF.set_override('use_stderr', False)
+logging.setup('nova')
LOG = logging.getLogger(__name__)
+eventlet.monkey_patch(os=False)
+
+_DB_CACHE = None
+
+
+class Database(fixtures.Fixture):
+
+ def __init__(self, db_session, db_migrate, sql_connection,
+ sqlite_db, sqlite_clean_db):
+ self.sql_connection = sql_connection
+ self.sqlite_db = sqlite_db
+ self.sqlite_clean_db = sqlite_clean_db
+
+ self.engine = db_session.get_engine()
+ self.engine.dispose()
+ conn = self.engine.connect()
+ if sql_connection == "sqlite://":
+ if db_migrate.db_version() > db_migrate.INIT_VERSION:
+ return
+ else:
+ testdb = paths.state_path_rel(sqlite_db)
+ if os.path.exists(testdb):
+ return
+ db_migrate.db_sync()
+ self.post_migrations()
+ if sql_connection == "sqlite://":
+ conn = self.engine.connect()
+ self._DB = "".join(line for line in conn.connection.iterdump())
+ self.engine.dispose()
+ else:
+ cleandb = paths.state_path_rel(sqlite_clean_db)
+ shutil.copyfile(testdb, cleandb)
-class TestingException(Exception):
- pass
+ def setUp(self):
+ super(Database, self).setUp()
+
+ if self.sql_connection == "sqlite://":
+ conn = self.engine.connect()
+ conn.connection.executescript(self._DB)
+ self.addCleanup(self.engine.dispose)
+ else:
+ shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
+ paths.state_path_rel(self.sqlite_db))
+
+ def post_migrations(self):
+ """Any addition steps that are needed outside of the migrations."""
+ ctxt = context.get_admin_context()
+ network = network_manager.VlanManager()
+ bridge_interface = CONF.flat_interface or CONF.vlan_interface
+ network.create_networks(ctxt,
+ label='test',
+ cidr=CONF.fixed_range,
+ multi_host=CONF.multi_host,
+ num_networks=CONF.num_networks,
+ network_size=CONF.network_size,
+ cidr_v6=CONF.fixed_range_v6,
+ gateway=CONF.gateway,
+ gateway_v6=CONF.gateway_v6,
+ bridge=CONF.flat_network_bridge,
+ bridge_interface=bridge_interface,
+ vpn_start=CONF.vpn_start,
+ vlan_start=CONF.vlan_start,
+ dns1=CONF.flat_network_dns)
+ for net in db.network_get_all(ctxt):
+ network.set_network_host(ctxt, net)
+
+
+class ReplaceModule(fixtures.Fixture):
+ """Replace a module with a fake module."""
+
+ def __init__(self, name, new_value):
+ self.name = name
+ self.new_value = new_value
+
+ def _restore(self, old_value):
+ sys.modules[self.name] = old_value
+ def setUp(self):
+ super(ReplaceModule, self).setUp()
+ old_value = sys.modules.get(self.name)
+ sys.modules[self.name] = self.new_value
+ self.addCleanup(self._restore, old_value)
-class TestCase(testtools.TestCase):
- """Test case base class for all unit tests."""
+
+class ServiceFixture(fixtures.Fixture):
+ """Run a service as a test fixture."""
+
+ def __init__(self, name, host=None, **kwargs):
+ name = name
+ host = host and host or uuid.uuid4().hex
+ kwargs.setdefault('host', host)
+ kwargs.setdefault('binary', 'nova-%s' % name)
+ self.kwargs = kwargs
def setUp(self):
- """Run before each test method to initialize test environment."""
- super(TestCase, self).setUp()
+ super(ServiceFixture, self).setUp()
+ self.service = service.Service.create(**self.kwargs)
+ self.service.start()
+ self.addCleanup(self.service.kill)
- fake_flags.set_defaults(FLAGS)
- config.parse_args([], default_config_files=[])
- # NOTE(vish): We need a better method for creating fixtures for tests
- # now that we have some required db setup for the system
- # to work properly.
- self.start = timeutils.utcnow()
- tests.reset_db()
+class MoxStubout(fixtures.Fixture):
+ """Deal with code around mox and stubout as a fixture."""
+ def setUp(self):
+ super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
- self.injected = []
- self._services = []
- self._modules = {}
+ self.addCleanup(self.mox.UnsetStubs)
+ self.addCleanup(self.stubs.UnsetAll)
+ self.addCleanup(self.stubs.SmartUnsetAll)
+ self.addCleanup(self.mox.VerifyAll)
+
+
+class TestingException(Exception):
+ pass
- def tearDown(self):
- """Runs after each test method to tear down test environment."""
+
+class TestCase(testtools.TestCase):
+ """Test case base class for all unit tests."""
+
+ def setUp(self):
+ """Run before each test method to initialize test environment."""
+ super(TestCase, self).setUp()
+ test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
- self.mox.UnsetStubs()
- self.stubs.UnsetAll()
- self.stubs.SmartUnsetAll()
- self.mox.VerifyAll()
- super(TestCase, self).tearDown()
- finally:
- # Reset any overridden flags
- FLAGS.reset()
-
- # Unstub modules
- for name, mod in self._modules.iteritems():
- if mod is not None:
- sys.modules[name] = mod
- else:
- sys.modules.pop(name)
- self._modules = {}
-
- # Stop any timers
- for x in self.injected:
- try:
- x.stop()
- except AssertionError:
- pass
-
- # Kill any services
- for x in self._services:
- try:
- x.kill()
- except Exception:
- pass
-
- # Delete attributes that don't start with _ so they don't pin
- # memory around unnecessarily for the duration of the test
- # suite
- for key in [k for k in self.__dict__.keys() if k[0] != '_']:
- del self.__dict__[key]
-
- def stub_module(self, name, mod):
- if name not in self._modules:
- self._modules[name] = sys.modules.get(name)
- sys.modules[name] = mod
+ test_timeout = int(test_timeout)
+ except ValueError:
+ # If timeout value is invalid do not set a timeout.
+ test_timeout = 0
+ if test_timeout > 0:
+ self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
+ self.useFixture(fixtures.NestedTempfile())
+ self.useFixture(fixtures.TempHomeDir())
+
+ if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
+ os.environ.get('OS_STDOUT_CAPTURE') == '1'):
+ stdout = self.useFixture(fixtures.StringStream('stdout')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
+ if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
+ os.environ.get('OS_STDERR_CAPTURE') == '1'):
+ stderr = self.useFixture(fixtures.StringStream('stderr')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+
+ self.log_fixture = self.useFixture(fixtures.FakeLogger('nova'))
+ self.useFixture(conf_fixture.ConfFixture(CONF))
+
+ global _DB_CACHE
+ if not _DB_CACHE:
+ _DB_CACHE = Database(session, migration,
+ sql_connection=CONF.sql_connection,
+ sqlite_db=CONF.sqlite_db,
+ sqlite_clean_db=CONF.sqlite_clean_db)
+ self.useFixture(_DB_CACHE)
+
+ mox_fixture = self.useFixture(MoxStubout())
+ self.mox = mox_fixture.mox
+ self.stubs = mox_fixture.stubs
+ self.addCleanup(self._clear_attrs)
+ self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
+ self.policy = self.useFixture(policy_fixture.PolicyFixture())
+ CONF.set_override('fatal_exception_format_errors', True)
+
+ def _clear_attrs(self):
+ # Delete attributes that don't start with _ so they don't pin
+ # memory around unnecessarily for the duration of the test
+ # suite
+ for key in [k for k in self.__dict__.keys() if k[0] != '_']:
+ del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
+ group = kw.pop('group', None)
for k, v in kw.iteritems():
- FLAGS.set_override(k, v)
+ CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
- host = host and host or uuid.uuid4().hex
- kwargs.setdefault('host', host)
- kwargs.setdefault('binary', 'nova-%s' % name)
- svc = service.Service.create(**kwargs)
- svc.start()
- self._services.append(svc)
- return svc
-
- # Useful assertions
- def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
- """Assert two dicts are equivalent.
-
- This is a 'deep' match in the sense that it handles nested
- dictionaries appropriately.
-
- NOTE:
-
- If you don't care (or don't know) a given value, you can specify
- the string DONTCARE as the value. This will cause that dict-item
- to be skipped.
-
- """
- def raise_assertion(msg):
- d1str = str(d1)
- d2str = str(d2)
- base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
- 'd2: %(d2str)s' % locals())
- raise AssertionError(base_msg)
-
- d1keys = set(d1.keys())
- d2keys = set(d2.keys())
- if d1keys != d2keys:
- d1only = d1keys - d2keys
- d2only = d2keys - d1keys
- raise_assertion('Keys in d1 and not d2: %(d1only)s. '
- 'Keys in d2 and not d1: %(d2only)s' % locals())
-
- for key in d1keys:
- d1value = d1[key]
- d2value = d2[key]
- try:
- error = abs(float(d1value) - float(d2value))
- within_tolerance = error <= tolerance
- except (ValueError, TypeError):
- # If both values aren't convertible to float, just ignore
- # ValueError if arg is a str, TypeError if it's something else
- # (like None)
- within_tolerance = False
-
- if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
- self.assertDictMatch(d1value, d2value)
- elif 'DONTCARE' in (d1value, d2value):
- continue
- elif approx_equal and within_tolerance:
- continue
- elif d1value != d2value:
- raise_assertion("d1['%(key)s']=%(d1value)s != "
- "d2['%(key)s']=%(d2value)s" % locals())
-
- def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
- """Assert a list of dicts are equivalent."""
- def raise_assertion(msg):
- L1str = str(L1)
- L2str = str(L2)
- base_msg = ('List of dictionaries do not match: %(msg)s '
- 'L1: %(L1str)s L2: %(L2str)s' % locals())
- raise AssertionError(base_msg)
-
- L1count = len(L1)
- L2count = len(L2)
- if L1count != L2count:
- raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
- 'len(L2)=%(L2count)d' % locals())
-
- for d1, d2 in zip(L1, L2):
- self.assertDictMatch(d1, d2, approx_equal=approx_equal,
- tolerance=tolerance)
-
- def assertSubDictMatch(self, sub_dict, super_dict):
- """Assert a sub_dict is subset of super_dict."""
- self.assertEqual(True,
- set(sub_dict.keys()).issubset(set(super_dict.keys())))
- for k, sub_value in sub_dict.items():
- super_value = super_dict[k]
- if isinstance(sub_value, dict):
- self.assertSubDictMatch(sub_value, super_value)
- elif 'DONTCARE' in (sub_value, super_value):
- continue
- else:
- self.assertEqual(sub_value, super_value)
+ svc = self.useFixture(ServiceFixture(name, host, **kwargs))
+ return svc.service
+
+
+class APICoverage(object):
+
+ cover_api = None
+
+ def test_api_methods(self):
+ self.assertTrue(self.cover_api is not None)
+ api_methods = [x for x in dir(self.cover_api)
+ if not x.startswith('_')]
+ test_methods = [x[5:] for x in dir(self)
+ if x.startswith('test_')]
+ self.assertThat(
+ test_methods,
+ testtools.matchers.ContainsAll(api_methods))
+
+
+class TimeOverride(fixtures.Fixture):
+ """Fixture to start and remove time override."""
+
+ def setUp(self):
+ super(TimeOverride, self).setUp()
+ timeutils.set_time_override()
+ self.addCleanup(timeutils.clear_time_override)
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index ee2d17afa..7109e000f 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -28,83 +28,3 @@
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)
-import os
-import shutil
-
-from nova.db.sqlalchemy.session import get_engine
-from nova import flags
-from nova.openstack.common import log as logging
-
-import eventlet
-
-
-eventlet.monkey_patch(os=False)
-
-FLAGS = flags.FLAGS
-FLAGS.use_stderr = False
-
-logging.setup('nova')
-
-_DB = None
-
-
-def reset_db():
- if FLAGS.sql_connection == "sqlite://":
- engine = get_engine()
- engine.dispose()
- conn = engine.connect()
- if _DB:
- conn.connection.executescript(_DB)
- else:
- setup()
- else:
- shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
- os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
-
-
-def setup():
- import mox # Fail fast if you don't have mox. Workaround for bug 810424
-
- from nova import context
- from nova import db
- from nova.db import migration
- from nova.network import manager as network_manager
- from nova.tests import fake_flags
- fake_flags.set_defaults(FLAGS)
-
- if FLAGS.sql_connection == "sqlite://":
- if migration.db_version() > migration.INIT_VERSION:
- return
- else:
- testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
- if os.path.exists(testdb):
- return
- migration.db_sync()
- ctxt = context.get_admin_context()
- network = network_manager.VlanManager()
- bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
- network.create_networks(ctxt,
- label='test',
- cidr=FLAGS.fixed_range,
- multi_host=FLAGS.multi_host,
- num_networks=FLAGS.num_networks,
- network_size=FLAGS.network_size,
- cidr_v6=FLAGS.fixed_range_v6,
- gateway=FLAGS.gateway,
- gateway_v6=FLAGS.gateway_v6,
- bridge=FLAGS.flat_network_bridge,
- bridge_interface=bridge_interface,
- vpn_start=FLAGS.vpn_start,
- vlan_start=FLAGS.vlan_start,
- dns1=FLAGS.flat_network_dns)
- for net in db.network_get_all(ctxt):
- network.set_network_host(ctxt, net)
-
- if FLAGS.sql_connection == "sqlite://":
- global _DB
- engine = get_engine()
- conn = engine.connect()
- _DB = "".join(line for line in conn.connection.iterdump())
- else:
- cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
- shutil.copyfile(testdb, cleandb)
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 47f78ab75..d403ba1f0 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -19,6 +19,7 @@
import copy
import tempfile
+import uuid
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
@@ -27,17 +28,20 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
+from nova.tests import matchers
from nova import volume
-
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_instance_type', 'nova.compute.instance_types')
+CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def get_fake_cache():
@@ -59,7 +63,7 @@ def get_fake_cache():
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
@@ -116,7 +120,12 @@ class CinderCloudTestCase(test.TestCase):
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
# set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
@@ -127,6 +136,7 @@ class CinderCloudTestCase(test.TestCase):
self.project_id,
is_admin=True)
self.volume_api = volume.API()
+ self.volume_api.reset_fake_api(self.context)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
@@ -157,7 +167,7 @@ class CinderCloudTestCase(test.TestCase):
name)
def test_describe_volumes(self):
- """Makes sure describe_volumes works and filters results."""
+ # Makes sure describe_volumes works and filters results.
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -198,7 +208,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
- """Makes sure create_volume works when we specify a snapshot."""
+ # Makes sure create_volume works when we specify a snapshot.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -223,7 +233,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, volume1_id)
def test_describe_snapshots(self):
- """Makes sure describe_snapshots works and filters results."""
+ # Makes sure describe_snapshots works and filters results.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -250,8 +260,56 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
+ def test_format_snapshot_maps_status(self):
+ fake_snapshot = {'status': 'new',
+ 'id': 1,
+ 'volume_id': 1,
+ 'created_at': 1353560191.08117,
+ 'progress': 90,
+ 'project_id': str(uuid.uuid4()),
+ 'volume_size': 10000,
+ 'display_description': 'desc'}
+
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'creating'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'available'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'active'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'deleting'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'deleted'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot), None)
+
+ fake_snapshot['status'] = 'error'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'error')
+
+ fake_snapshot['status'] = 'banana'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'banana')
+
def test_create_snapshot(self):
- """Makes sure create_snapshot works."""
+ # Makes sure create_snapshot works.
availability_zone = 'zone1:host1'
result = self.cloud.describe_snapshots(self.context)
vol1 = self.cloud.create_volume(self.context,
@@ -272,7 +330,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_delete_snapshot(self):
- """Makes sure delete_snapshot works."""
+ # Makes sure delete_snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -434,18 +492,18 @@ class CinderCloudTestCase(test.TestCase):
result = {}
self.cloud._format_instance_bdm(self.context, inst1['uuid'],
'/dev/sdb1', result)
- self.assertSubDictMatch(
+ self.assertThat(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
- result)
+ matchers.IsSubDictOf(result))
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, inst2['uuid'],
'/dev/sdc1', result)
- self.assertSubDictMatch(
+ self.assertThat(
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
- result)
+ matchers.IsSubDictOf(result))
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
@@ -465,7 +523,7 @@ class CinderCloudTestCase(test.TestCase):
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
- self.assertSubDictMatch(x, y)
+ self.assertThat(x, matchers.IsSubDictOf(y))
found = True
break
self.assertTrue(found)
@@ -477,24 +535,19 @@ class CinderCloudTestCase(test.TestCase):
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(inst1['id'])
- self.assertSubDictMatch(self._expected_instance_bdm1, result)
+ self.assertThat(
+ self._expected_instance_bdm1,
+ matchers.IsSubDictOf(result))
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(inst2['id'])
- self.assertSubDictMatch(self._expected_instance_bdm2, result)
+ self.assertThat(
+ self._expected_instance_bdm2,
+ matchers.IsSubDictOf(result))
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
- def assertDictListUnorderedMatch(self, L1, L2, key):
- self.assertEqual(len(L1), len(L2))
- for d1 in L1:
- self.assertTrue(key in d1)
- for d2 in L2:
- self.assertTrue(key in d2)
- if d1[key] == d2[key]:
- self.assertDictMatch(d1, d2)
-
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
@@ -621,12 +674,12 @@ class CinderCloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
- def _restart_compute_service(self, periodic_interval=None):
+ def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
- if periodic_interval:
+ if periodic_interval_max:
self.compute = self.start_service(
- 'compute', periodic_interval=periodic_interval)
+ 'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
@@ -654,7 +707,7 @@ class CinderCloudTestCase(test.TestCase):
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
- """Make sure run instance with block device mapping works"""
+ # Make sure run instance with block device mapping works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -665,10 +718,10 @@ class CinderCloudTestCase(test.TestCase):
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
@@ -735,7 +788,7 @@ class CinderCloudTestCase(test.TestCase):
self._restart_compute_service()
def test_stop_with_attached_volume(self):
- """Make sure attach info is reflected to block device mapping"""
+ # Make sure attach info is reflected to block device mapping.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
@@ -748,9 +801,9 @@ class CinderCloudTestCase(test.TestCase):
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
@@ -810,7 +863,7 @@ class CinderCloudTestCase(test.TestCase):
return result['snapshotId']
def test_run_with_snapshot(self):
- """Makes sure run/stop/start instance with snapshot works."""
+ # Makes sure run/stop/start instance with snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -831,7 +884,7 @@ class CinderCloudTestCase(test.TestCase):
snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snap1_uuid,
@@ -883,15 +936,15 @@ class CinderCloudTestCase(test.TestCase):
# self.cloud.delete_snapshot(self.context, snapshot_id)
def test_create_image(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 1978aa50a..562473121 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -25,9 +25,12 @@ import os
import string
import tempfile
+import fixtures
+
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
+from nova.api.metadata import password
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import utils as compute_utils
@@ -35,21 +38,26 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.image import s3
from nova.network import api as network_api
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
+from nova.tests import matchers
from nova import utils
from nova.virt import fake as fake_virt
from nova import volume
-
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_instance_type', 'nova.compute.instance_types')
+CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+
+HOST = "testhost"
def get_fake_cache():
@@ -71,7 +79,7 @@ def get_fake_cache():
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
@@ -94,10 +102,9 @@ def get_instances_with_cached_ips(orig_func, *args, **kwargs):
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
- volume_api_class='nova.tests.fake_volume.API',
- volumes_dir=vol_tmpdir)
+ volume_api_class='nova.tests.fake_volume.API')
+ self.useFixture(fixtures.FakeLogger('boto'))
def fake_show(meh, context, id):
return {'id': id,
@@ -129,7 +136,12 @@ class CloudTestCase(test.TestCase):
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
# set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
@@ -176,7 +188,7 @@ class CloudTestCase(test.TestCase):
name)
def test_describe_regions(self):
- """Makes sure describe regions runs without raising an exception"""
+ # Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
@@ -184,7 +196,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
- """Makes sure describe addresses runs without raising an exception"""
+ # Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
@@ -196,7 +208,7 @@ class CloudTestCase(test.TestCase):
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
- """Makes sure describe specific address works"""
+ # Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
@@ -235,7 +247,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
- """Verifies associate runs cleanly without raising an exception"""
+ # Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
@@ -244,6 +256,7 @@ class CloudTestCase(test.TestCase):
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
@@ -301,8 +314,20 @@ class CloudTestCase(test.TestCase):
self.cloud.disassociate_address,
self.context, public_ip=address)
+ def test_disassociate_unassociated_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
def test_describe_security_groups(self):
- """Makes sure describe_security_groups works and filters results."""
+ # Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
@@ -318,7 +343,7 @@ class CloudTestCase(test.TestCase):
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
- """Makes sure describe_security_groups works and filters results."""
+ # Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
@@ -381,7 +406,7 @@ class CloudTestCase(test.TestCase):
def test_security_group_quota_limit(self):
self.flags(quota_security_groups=10)
- for i in range(1, FLAGS.quota_security_groups + 1):
+ for i in range(1, CONF.quota_security_groups + 1):
name = 'test name %i' % i
descript = 'test description %i' % i
create = self.cloud.create_security_group
@@ -644,12 +669,12 @@ class CloudTestCase(test.TestCase):
self.project_id, 'testgrp1')
get_rules = db.security_group_rule_get_by_security_group
- self.assertTrue(get_rules(self.context, group1.id))
+ self.assertTrue(get_rules(self.context, group1['id']))
self.cloud.delete_security_group(self.context, 'testgrp2')
- self.assertFalse(get_rules(self.context, group1.id))
+ self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
- """Ensure that a group can not be deleted if in use by an instance."""
+ # Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
@@ -664,7 +689,7 @@ class CloudTestCase(test.TestCase):
'description': 'Test group'}
group = db.security_group_create(self.context, args)
- db.instance_add_security_group(self.context, inst.uuid, group.id)
+ db.instance_add_security_group(self.context, inst['uuid'], group['id'])
self.assertRaises(exception.InvalidGroup,
self.cloud.delete_security_group,
@@ -675,45 +700,55 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
- """Makes sure describe_availability_zones works and filters results."""
+ # Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': "zone1"})
+ 'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': "zone2"})
+ 'report_count': 0})
+ # Aggregate based zones
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg.id, 'host1_zones')
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg.id, 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+ self.assertEqual(len(result['availabilityZoneInfo']), 16)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
- """Makes sure describe_availability_zones works and filters results."""
+ # Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': "zone1"})
+ 'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': "zone2"})
+ 'report_count': 0})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'second_zone'})
+ db.aggregate_host_add(self.context, agg.id, 'host2_zones')
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
- self.assertEqual(len(result['availabilityZoneInfo']), 13)
+ self.assertEqual(len(result['availabilityZoneInfo']), 15)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_instances(self):
- """Makes sure describe_instances works and filters results."""
+ # Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -733,11 +768,17 @@ class CloudTestCase(test.TestCase):
'hostname': 'server-4321',
'vm_state': 'active'})
comp1 = db.service_create(self.context, {'host': 'host1',
- 'availability_zone': 'zone1',
'topic': "compute"})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg.id, 'host1')
+
comp2 = db.service_create(self.context, {'host': 'host2',
- 'availability_zone': 'zone2',
'topic': "compute"})
+ agg2 = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg2.id, 'host2')
+
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
@@ -759,13 +800,32 @@ class CloudTestCase(test.TestCase):
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertEqual(instance['dnsNameV6'],
'fe80:b33f::a8bb:ccff:fedd:eeff')
+
+ # A filter with even one invalid id should cause an exception to be
+ # raised
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id, '435679'])
+
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
+ def test_describe_instances_all_invalid(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ instance_id = ec2utils.id_to_ec2_inst_id('435679')
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id])
+
def test_describe_instances_sorting(self):
- """Makes sure describe_instances works and is sorted as expected."""
+ # Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -801,11 +861,9 @@ class CloudTestCase(test.TestCase):
inst3 = db.instance_create(self.context, inst3_kwargs)
comp1 = db.service_create(self.context, {'host': 'host1',
- 'availability_zone': 'zone1',
'topic': "compute"})
comp2 = db.service_create(self.context, {'host': 'host2',
- 'availability_zone': 'zone2',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
@@ -821,7 +879,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
- """Makes sure describe_instances for instanceState works."""
+ # Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
@@ -851,7 +909,7 @@ class CloudTestCase(test.TestCase):
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
- """Makes sure describe_instances w/ no ipv6 works."""
+ # Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -963,7 +1021,7 @@ class CloudTestCase(test.TestCase):
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
- self.assertDictMatch(d1, d2)
+ self.assertThat(d1, matchers.DictMatches(d2))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
@@ -1096,7 +1154,7 @@ class CloudTestCase(test.TestCase):
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
- """test for rootDeviceName and blockDeiceMapping"""
+ # test for rootDeviceName and blockDeviceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
@@ -1283,17 +1341,17 @@ class CloudTestCase(test.TestCase):
'imageType': 'machine',
'description': None}
result = self.cloud._format_image(image)
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
image['properties']['image_location'] = None
expected['imageLocation'] = 'None (name)'
result = self.cloud._format_image(image)
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
image['name'] = None
image['properties']['image_location'] = 'location'
expected['imageLocation'] = 'location'
expected['name'] = 'location'
result = self.cloud._format_image(image)
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
@@ -1330,10 +1388,21 @@ class CloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
+ def test_get_password_data(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_instance_type,
+ max_count=1)
+ self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
+ output = self.cloud.get_password_data(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(output['passwordData'], 'fakepass')
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
- instance_type=FLAGS.default_instance_type,
+ instance_type=CONF.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
@@ -1446,7 +1515,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1478,7 +1547,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
@@ -1514,7 +1583,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1533,7 +1602,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1553,8 +1622,8 @@ class CloudTestCase(test.TestCase):
self.context, **kwargs)
def test_run_instances_image_status_active(self):
- kwargs = {'image_id': FLAGS.default_image,
- 'instance_type': FLAGS.default_instance_type,
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1578,22 +1647,22 @@ class CloudTestCase(test.TestCase):
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
- def _restart_compute_service(self, periodic_interval=None):
+ def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
- if periodic_interval:
+ if periodic_interval_max:
self.compute = self.start_service(
- 'compute', periodic_interval=periodic_interval)
+ 'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
- """Makes sure stop/start instance works"""
+ # Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1622,7 +1691,7 @@ class CloudTestCase(test.TestCase):
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1644,7 +1713,7 @@ class CloudTestCase(test.TestCase):
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1663,7 +1732,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1684,7 +1753,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1695,7 +1764,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1728,7 +1797,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
@@ -1753,7 +1822,7 @@ class CloudTestCase(test.TestCase):
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1791,15 +1860,15 @@ class CloudTestCase(test.TestCase):
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
- """Make sure that CreateImage works"""
+ """Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
@@ -1834,7 +1903,7 @@ class CloudTestCase(test.TestCase):
connection_info='{"foo":"bar"}')
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -1888,11 +1957,11 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
def test_create_image_no_reboot(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
@@ -1901,13 +1970,13 @@ class CloudTestCase(test.TestCase):
instance
"""
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
+ self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
@@ -1923,7 +1992,7 @@ class CloudTestCase(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -1995,7 +2064,7 @@ class CloudTestCase(test.TestCase):
]
def test_describe_instance_attribute(self):
- """Make sure that describe_instance_attribute works"""
+ # Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
@@ -2015,20 +2084,12 @@ class CloudTestCase(test.TestCase):
}
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
- def fake_volume_get(ctxt, volume_id, session=None):
- if volume_id == 87654321:
- return {'id': volume_id,
- 'attach_time': '13:56:24',
- 'status': 'in-use'}
- raise exception.VolumeNotFound(volume_id=volume_id)
- self.stubs.Set(db, 'volume_get', fake_volume_get)
-
def fake_get_instance_uuid_by_ec2_id(ctxt, int_id):
if int_id == 305419896:
return 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
raise exception.InstanceNotFound(instance_id=int_id)
self.stubs.Set(db, 'get_instance_uuid_by_ec2_id',
- fake_get_instance_uuid_by_ec2_id)
+ fake_get_instance_uuid_by_ec2_id)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
@@ -2082,7 +2143,7 @@ class CloudTestCase(test.TestCase):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior"""
- kwargs.update({'instance_type': FLAGS.default_instance_type,
+ kwargs.update({'instance_type': CONF.default_instance_type,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
@@ -2111,7 +2172,6 @@ class CloudTestCase(test.TestCase):
def fake_show(self, context, id_):
LOG.debug("id_ %s", id_)
- print id_
prop = {}
if id_ == 'ami-3':
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index c9c11d547..4dcdf4e54 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -24,7 +24,7 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
@@ -32,8 +32,9 @@ from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class EC2ValidateTestCase(test.TestCase):
@@ -51,6 +52,8 @@ class EC2ValidateTestCase(test.TestCase):
self.cloud = cloud.CloudController()
# set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
@@ -173,7 +176,7 @@ class EC2ValidateTestCase(test.TestCase):
class EC2TimestampValidationTestCase(test.TestCase):
- """Test case for EC2 request timestamp validation"""
+ """Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
diff --git a/nova/tests/api/ec2/test_faults.py b/nova/tests/api/ec2/test_faults.py
index e26b8feaf..a3d97566a 100644
--- a/nova/tests/api/ec2/test_faults.py
+++ b/nova/tests/api/ec2/test_faults.py
@@ -22,13 +22,13 @@ class TestFaults(test.TestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
diff --git a/nova/tests/api/ec2/test_middleware.py b/nova/tests/api/ec2/test_middleware.py
index a618817bd..f50689028 100644
--- a/nova/tests/api/ec2/test_middleware.py
+++ b/nova/tests/api/ec2/test_middleware.py
@@ -24,11 +24,11 @@ import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
@webob.dec.wsgify
@@ -62,28 +62,28 @@ class LockoutTestCase(test.TestCase):
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
- self._send_bad_attempts('test', FLAGS.lockout_attempts)
+ self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
- self._send_bad_attempts('test', FLAGS.lockout_attempts)
+ self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
- timeutils.advance_time_seconds(FLAGS.lockout_minutes * 60)
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
- self._send_bad_attempts('test1', FLAGS.lockout_attempts)
+ self._send_bad_attempts('test1', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
- timeutils.advance_time_seconds(FLAGS.lockout_minutes * 60)
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
- self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
- timeutils.advance_time_seconds(FLAGS.lockout_window * 60)
- self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
+ timeutils.advance_time_seconds(CONF.lockout_window * 60)
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
@@ -116,6 +116,15 @@ class ExecutorTestCase(test.TestCase):
result = self._execute(not_found)
self.assertIn('i-00000005', self._extract_message(result))
+ def test_instance_not_found_none(self):
+ def not_found(context):
+ raise exception.InstanceNotFound(instance_id=None)
+
+ # NOTE(mikal): we want no exception to be raised here, which was what
+ # was happening in bug/1080406
+ result = self._execute(not_found)
+ self.assertIn('None', self._extract_message(result))
+
def test_snapshot_not_found(self):
def not_found(context):
raise exception.SnapshotNotFound(snapshot_id=5)
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
index cfc7fb86d..1e4adf574 100644
--- a/nova/tests/api/openstack/common.py
+++ b/nova/tests/api/openstack/common.py
@@ -21,7 +21,7 @@ from nova.openstack.common import jsonutils
def webob_factory(url):
- """Factory for removing duplicate webob code from tests"""
+ """Factory for removing duplicate webob code from tests."""
base_url = url
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index c412b2c5c..dfb687cf4 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -13,6 +13,7 @@
# under the License.
import datetime
+import uuid
import webob
@@ -22,15 +23,14 @@ from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
INSTANCE = {
"id": 1,
@@ -54,7 +54,9 @@ def fake_compute_api(*args, **kwargs):
def fake_compute_api_raises_invalid_state(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
def fake_compute_api_get(self, context, instance_id):
@@ -88,7 +90,7 @@ class AdminActionsTest(test.TestCase):
def setUp(self):
super(AdminActionsTest, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
- self.UUID = utils.gen_uuid()
+ self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
@@ -124,7 +126,7 @@ class AdminActionsTest(test.TestCase):
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 409)
- self.assertIn("invalid state for '%(_action)s'" % locals(),
+ self.assertIn("Cannot \'%(_action)s\' while instance" % locals(),
res.body)
def test_migrate_live_enabled(self):
@@ -181,7 +183,7 @@ class CreateBackupTests(test.TestCase):
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.backup_stubs = fakes.stub_out_compute_api_backup(self.stubs)
self.app = compute.APIRouter(init_only=('servers',))
- self.uuid = utils.gen_uuid()
+ self.uuid = uuid.uuid4()
def _get_request(self, body):
url = '/fake/servers/%s/action' % self.uuid
@@ -216,7 +218,7 @@ class CreateBackupTests(test.TestCase):
'metadata': {'123': 'asdf'},
},
}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
body['createBackup']['metadata']['foo%i' % num] = "bar"
request = self._get_request(body)
@@ -224,7 +226,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 413)
def test_create_backup_no_name(self):
- """Name is required for backups"""
+ # Name is required for backups.
body = {
'createBackup': {
'backup_type': 'daily',
@@ -237,7 +239,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_no_rotation(self):
- """Rotation is required for backup requests"""
+ # Rotation is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -249,8 +251,24 @@ class CreateBackupTests(test.TestCase):
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
+ def test_create_backup_negative_rotation(self):
+ """Rotation must be greater than or equal to zero
+ for backup requests
+ """
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': -1,
+ },
+ }
+
+ request = self._get_request(body)
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 400)
+
def test_create_backup_no_backup_type(self):
- """Backup Type (daily or weekly) is required for backup requests"""
+ # Backup Type (daily or weekly) is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -269,8 +287,24 @@ class CreateBackupTests(test.TestCase):
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
- def test_create_backup(self):
- """The happy path for creating backups"""
+ def test_create_backup_rotation_is_zero(self):
+ # The happy path for creating backups if rotation is zero.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 0,
+ },
+ }
+
+ request = self._get_request(body)
+ response = request.get_response(self.app)
+
+ self.assertEqual(response.status_int, 202)
+ self.assertFalse('Location' in response.headers)
+
+ def test_create_backup_rotation_is_positive(self):
+ # The happy path for creating backups if rotation is positive.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -282,6 +316,7 @@ class CreateBackupTests(test.TestCase):
request = self._get_request(body)
response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 202)
self.assertTrue(response.headers['Location'])
def test_create_backup_raises_conflict_on_invalid_state(self):
@@ -307,12 +342,12 @@ class ResetStateTests(test.TestCase):
self.exists = True
self.kwargs = None
- self.uuid = utils.gen_uuid()
+ self.uuid = uuid.uuid4()
def fake_get(inst, context, instance_id):
if self.exists:
return dict(id=1, uuid=instance_id, vm_state=vm_states.ACTIVE)
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id=instance_id)
def fake_update(inst, context, instance, **kwargs):
self.kwargs = kwargs
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
new file mode 100644
index 000000000..b8f4e6398
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Compute admin api w/ Cells
+"""
+
+from nova.api.openstack.compute.contrib import admin_actions
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import vm_states
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+LOG = logging.getLogger('nova.tests.test_compute_cells')
+
+INSTANCE_IDS = {'inst_id': 1}
+
+
+class CellsAdminAPITestCase(test.TestCase):
+
+ def setUp(self):
+ super(CellsAdminAPITestCase, self).setUp()
+
+ def _fake_cell_read_only(*args, **kwargs):
+ return False
+
+ def _fake_validate_cell(*args, **kwargs):
+ return
+
+ def _fake_compute_api_get(context, instance_id):
+ return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE,
+ 'task_state': None, 'cell_name': None}
+
+ def _fake_instance_update_and_get_original(context, instance_uuid,
+ values):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ return (inst, inst)
+
+ def fake_cast_to_cells(context, instance, method, *args, **kwargs):
+ """
+ Makes sure that the cells recieve the cast to update
+ the cell state
+ """
+ self.cells_recieved_kwargs.update(kwargs)
+
+ self.admin_api = admin_actions.AdminActionsController()
+ self.admin_api.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.stubs.Set(self.admin_api.compute_api, '_cell_read_only',
+ _fake_cell_read_only)
+ self.stubs.Set(self.admin_api.compute_api, '_validate_cell',
+ _fake_validate_cell)
+ self.stubs.Set(self.admin_api.compute_api, 'get',
+ _fake_compute_api_get)
+ self.stubs.Set(self.admin_api.compute_api.db,
+ 'instance_update_and_get_original',
+ _fake_instance_update_and_get_original)
+ self.stubs.Set(self.admin_api.compute_api, '_cast_to_cells',
+ fake_cast_to_cells)
+
+ self.uuid = uuidutils.generate_uuid()
+ url = '/fake/servers/%s/action' % self.uuid
+ self.request = fakes.HTTPRequest.blank(url)
+ self.cells_recieved_kwargs = {}
+
+ def test_reset_active(self):
+ body = {"os-resetState": {"state": "error"}}
+ result = self.admin_api._reset_state(self.request, 'inst_id', body)
+
+ self.assertEqual(result.status_int, 202)
+ # Make sure the cells recieved the update
+ self.assertEqual(self.cells_recieved_kwargs,
+ dict(vm_state=vm_states.ERROR,
+ task_state=None))
diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/api/openstack/compute/contrib/test_agents.py
new file mode 100644
index 000000000..60659b3c6
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_agents.py
@@ -0,0 +1,185 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.api.openstack.compute.contrib import agents
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import test
+
+fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': 1},
+ {'hypervisor': 'kvm', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'xxx://xxxx/xxx/xxx1',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f546',
+ 'id': 2},
+ {'hypervisor': 'xen', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'xxx://xxxx/xxx/xxx2',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f547',
+ 'id': 3},
+ {'hypervisor': 'xen', 'os': 'win',
+ 'architecture': 'power',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx3',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f548',
+ 'id': 4},
+ ]
+
+
+def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+
+def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+
+def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+
+def fake_agent_build_create(context, values):
+ values['id'] = 1
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithHypervisor(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {'hypervisor': 'kvm'}
+
+
+class AgentsTest(test.TestCase):
+
+ def setUp(self):
+ super(AgentsTest, self).setUp()
+
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.context = context.get_admin_context()
+ self.controller = agents.AgentController()
+
+ def tearDown(self):
+ super(AgentsTest, self).tearDown()
+
+ def test_agents_create(self):
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ response = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1}}
+ res_dict = self.controller.create(req, body)
+ self.assertEqual(res_dict, response)
+
+ def test_agents_delete(self):
+ req = FakeRequest()
+ self.controller.delete(req, 1)
+
+ def test_agents_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ agents_list = [{'hypervisor': 'kvm', 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1},
+ {'hypervisor': 'kvm', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'xxx://xxxx/xxx/xxx1',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f546',
+ 'agent_id': 2},
+ {'hypervisor': 'xen', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'xxx://xxxx/xxx/xxx2',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f547',
+ 'agent_id': 3},
+ {'hypervisor': 'xen', 'os': 'win',
+ 'architecture': 'power',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx3',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f548',
+ 'agent_id': 4},
+ ]
+ self.assertEqual(res_dict, {'agents': agents_list})
+
+ def test_agents_list_with_hypervisor(self):
+ req = FakeRequestWithHypervisor()
+ res_dict = self.controller.index(req)
+ response = [{'hypervisor': 'kvm', 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1},
+ {'hypervisor': 'kvm', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'xxx://xxxx/xxx/xxx1',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f546',
+ 'agent_id': 2},
+ ]
+ self.assertEqual(res_dict, {'agents': response})
+
+ def test_agents_update(self):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ response = {'agent': {'agent_id': 1,
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ res_dict = self.controller.update(req, 1, body)
+ self.assertEqual(res_dict, response)
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index 4fa68bd7d..c57d6a91b 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -22,6 +22,7 @@ from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova import test
+from nova.tests import matchers
LOG = logging.getLogger(__name__)
AGGREGATE_LIST = [
@@ -77,7 +78,7 @@ class AggregateTestCase(test.TestCase):
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
- raise exception.AggregateNameExists
+ raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
@@ -88,11 +89,15 @@ class AggregateTestCase(test.TestCase):
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
- raise exception.InvalidAggregateAction
+ raise exception.InvalidAggregateAction(action='create_aggregate',
+ aggregate_id="'N/A'",
+ reason='invalid zone')
+
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
- self.assertRaises(exc.HTTPConflict, self.controller.create,
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
@@ -118,7 +123,7 @@ class AggregateTestCase(test.TestCase):
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
- availablity_zone="nova1",
+ availability_zone="nova1",
foo='bar'))
def test_show(self):
@@ -178,9 +183,7 @@ class AggregateTestCase(test.TestCase):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
-
result = self.controller.update(self.req, "1", body=body)
-
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
@@ -227,7 +230,8 @@ class AggregateTestCase(test.TestCase):
def test_add_host_with_already_added_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.AggregateHostExists()
+ raise exception.AggregateHostExists(aggregate_id=aggregate,
+ host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
@@ -237,7 +241,7 @@ class AggregateTestCase(test.TestCase):
def test_add_host_with_bad_aggregate(self):
def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.AggregateNotFound()
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
@@ -247,7 +251,7 @@ class AggregateTestCase(test.TestCase):
def test_add_host_with_bad_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.ComputeHostNotFound()
+ raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
@@ -255,16 +259,6 @@ class AggregateTestCase(test.TestCase):
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
- def test_add_host_with_host_in_wrong_availability_zone(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.InvalidAggregateAction()
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
-
- self.assertRaises(exc.HTTPConflict, self.controller.action,
- self.req, "bogus_aggregate",
- body={"add_host": {"host": "host1"}})
-
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
@@ -285,7 +279,7 @@ class AggregateTestCase(test.TestCase):
def test_remove_host_with_bad_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
- raise exception.AggregateNotFound()
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
@@ -296,7 +290,8 @@ class AggregateTestCase(test.TestCase):
def test_remove_host_with_bad_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
- raise exception.AggregateHostNotFound()
+ raise exception.AggregateHostNotFound(aggregate_id=aggregate,
+ host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
@@ -319,7 +314,8 @@ class AggregateTestCase(test.TestCase):
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
- self.assertDictMatch(body["set_metadata"]['metadata'], values)
+ self.assertThat(body["set_metadata"]['metadata'],
+ matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
@@ -333,7 +329,7 @@ class AggregateTestCase(test.TestCase):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
- raise exception.AggregateNotFound()
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
@@ -364,7 +360,7 @@ class AggregateTestCase(test.TestCase):
def test_delete_aggregate_with_bad_aggregate(self):
def stub_delete_aggregate(context, aggregate):
- raise exception.AggregateNotFound()
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py
new file mode 100644
index 000000000..82d469524
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_cells.py
@@ -0,0 +1,396 @@
+# Copyright 2011-2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cells as cells_ext
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+FAKE_CELLS = [
+ dict(id=1, name='cell1', username='bob', is_parent=True,
+ weight_scale=1.0, weight_offset=0.0,
+ rpc_host='r1.example.org', password='xxxx'),
+ dict(id=2, name='cell2', username='alice', is_parent=False,
+ weight_scale=1.0, weight_offset=0.0,
+ rpc_host='r2.example.org', password='qwerty')]
+
+
+FAKE_CAPABILITIES = [
+ {'cap1': '0,1', 'cap2': '2,3'},
+ {'cap3': '4,5', 'cap4': '5,6'}]
+
+
+def fake_db_cell_get(context, cell_name):
+ for cell in FAKE_CELLS:
+ if cell_name == cell['name']:
+ return cell
+ else:
+ raise exception.CellNotFound(cell_name=cell_name)
+
+
+def fake_db_cell_create(context, values):
+ cell = dict(id=1)
+ cell.update(values)
+ return cell
+
+
+def fake_db_cell_update(context, cell_id, values):
+ cell = fake_db_cell_get(context, cell_id)
+ cell.update(values)
+ return cell
+
+
+def fake_cells_api_get_all_cell_info(*args):
+ cells = copy.deepcopy(FAKE_CELLS)
+ del cells[0]['password']
+ del cells[1]['password']
+ for i, cell in enumerate(cells):
+ cell['capabilities'] = FAKE_CAPABILITIES[i]
+ return cells
+
+
+def fake_db_cell_get_all(context):
+ return FAKE_CELLS
+
+
+class CellsTest(test.TestCase):
+ def setUp(self):
+ super(CellsTest, self).setUp()
+ self.stubs.Set(db, 'cell_get', fake_db_cell_get)
+ self.stubs.Set(db, 'cell_get_all', fake_db_cell_get_all)
+ self.stubs.Set(db, 'cell_update', fake_db_cell_update)
+ self.stubs.Set(db, 'cell_create', fake_db_cell_create)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors',
+ fake_cells_api_get_all_cell_info)
+
+ self.controller = cells_ext.Controller()
+ self.context = context.get_admin_context()
+
+ def _get_request(self, resource):
+ return fakes.HTTPRequest.blank('/v2/fake/' + resource)
+
+ def test_index(self):
+ req = self._get_request("cells")
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], FAKE_CELLS[i]['name'])
+ self.assertNotIn('capabilitiles', cell)
+ self.assertNotIn('password', cell)
+
+ def test_detail(self):
+ req = self._get_request("cells/detail")
+ res_dict = self.controller.detail(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], FAKE_CELLS[i]['name'])
+ self.assertEqual(cell['capabilities'], FAKE_CAPABILITIES[i])
+ self.assertNotIn('password', cell)
+
+ def test_show_bogus_cell_raises(self):
+ req = self._get_request("cells/bogus")
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus')
+
+ def test_get_cell_by_name(self):
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.show(req, 'cell1')
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertNotIn('password', cell)
+
+ def test_cell_delete(self):
+ call_info = {'delete_called': 0}
+
+ def fake_db_cell_delete(context, cell_name):
+ self.assertEqual(cell_name, 'cell999')
+ call_info['delete_called'] += 1
+
+ self.stubs.Set(db, 'cell_delete', fake_db_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ self.controller.delete(req, 'cell999')
+ self.assertEqual(call_info['delete_called'], 1)
+
+ def test_delete_bogus_cell_raises(self):
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
+ 'cell999')
+
+ def test_cell_create_parent(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent',
+ # Also test this is ignored/stripped
+ 'is_parent': False}}
+
+ req = self._get_request("cells")
+ res_dict = self.controller.create(req, body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'parent')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_child(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'child'}}
+
+ req = self._get_request("cells")
+ res_dict = self.controller.create(req, body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'child')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_no_name_raises(self):
+ body = {'cell': {'username': 'moocow',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_empty_string_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_bang_raises(self):
+ body = {'cell': {'name': 'moo!cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_create_name_with_invalid_type_raises(self):
+ body = {'cell': {'name': 'moocow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'invalid'}}
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_cell_update(self):
+ body = {'cell': {'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.update(req, 'cell1', body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], FAKE_CELLS[0]['rpc_host'])
+ self.assertEqual(cell['username'], 'zeb')
+ self.assertNotIn('password', cell)
+
+ def test_cell_update_empty_name_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.update, req, 'cell1', body)
+
+ def test_cell_update_invalid_type_raises(self):
+ body = {'cell': {'username': 'zeb',
+ 'type': 'invalid',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.update, req, 'cell1', body)
+
+ def test_cell_info(self):
+ caps = ['cap1=a;b', 'cap2=c;d']
+ self.flags(name='darksecret', capabilities=caps, group='cells')
+
+ req = self._get_request("cells/info")
+ res_dict = self.controller.info(req)
+ cell = res_dict['cell']
+ cell_caps = cell['capabilities']
+
+ self.assertEqual(cell['name'], 'darksecret')
+ self.assertEqual(cell_caps['cap1'], 'a;b')
+ self.assertEqual(cell_caps['cap2'], 'c;d')
+
+ def test_sync_instances(self):
+ call_info = {}
+
+ def sync_instances(self, context, **kwargs):
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ body = {}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], None)
+
+ body = {'project_id': 'test-project'}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], 'test-project')
+ self.assertEqual(call_info['updated_since'], None)
+
+ expected = timeutils.utcnow().isoformat()
+ if not expected.endswith("+00:00"):
+ expected += "+00:00"
+
+ body = {'updated_since': expected}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], expected)
+
+ body = {'updated_since': 'skjdfkjsdkf'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'foo': 'meow'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.sync_instances, req, body=body)
+
+
+class TestCellsXMLSerializer(test.TestCase):
+ def test_multiple_cells(self):
+ fixture = {'cells': fake_cells_api_get_all_cell_info()}
+
+ serializer = cells_ext.CellsTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+
+ def test_single_cell_with_caps(self):
+ cell = {'id': 1,
+ 'name': 'darksecret',
+ 'username': 'meow',
+ 'capabilities': {'cap1': 'a;b',
+ 'cap2': 'c;d'}}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'meow')
+ self.assertEqual(res_tree.get('password'), None)
+ self.assertEqual(len(res_tree), 1)
+
+ child = res_tree[0]
+ self.assertEqual(child.tag,
+ '{%s}capabilities' % xmlutil.XMLNS_V10)
+ for elem in child:
+ self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10,
+ '{%s}cap2' % xmlutil.XMLNS_V10))
+ if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'a;b')
+ elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'c;d')
+
+ def test_single_cell_without_caps(self):
+ cell = {'id': 1,
+ 'username': 'woof',
+ 'name': 'darksecret'}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'woof')
+ self.assertEqual(res_tree.get('password'), None)
+ self.assertEqual(len(res_tree), 0)
+
+
+class TestCellsXMLDeserializer(test.TestCase):
+ def test_cell_deserializer(self):
+ caps_dict = {'cap1': 'a;b',
+ 'cap2': 'c;d'}
+ caps_xml = ("<capabilities><cap1>a;b</cap1>"
+ "<cap2>c;d</cap2></capabilities>")
+ expected = {'cell': {'name': 'testcell1',
+ 'type': 'child',
+ 'rpc_host': 'localhost',
+ 'capabilities': caps_dict}}
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ "<cell><name>testcell1</name><type>child</type>"
+ "<rpc_host>localhost</rpc_host>"
+ "%s</cell>") % caps_xml
+ deserializer = cells_ext.CellDeserializer()
+ result = deserializer.deserialize(intext)
+ self.assertEqual(dict(body=expected), result)
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
index 97b78f81e..133554abd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
@@ -19,30 +19,31 @@ from nova.api.openstack.compute.contrib import cloudpipe
from nova.api.openstack import wsgi
from nova.compute import utils as compute_utils
from nova import db
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
+from nova.tests import matchers
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
def fake_vpn_instance():
return {
- 'id': 7, 'image_ref': FLAGS.vpn_image_id, 'vm_state': 'active',
+ 'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
'uuid': 7777, 'project_id': 'other',
}
-def compute_api_get_all_empty(context):
+def compute_api_get_all_empty(context, search_opts=None):
return []
-def compute_api_get_all(context):
+def compute_api_get_all(context, search_opts=None):
return [fake_vpn_instance()]
@@ -108,7 +109,7 @@ class CloudpipeTest(test.TestCase):
'state': 'running',
'instance_id': 7777,
'created_at': '1981-10-20T00:00:00Z'}]}
- self.assertDictMatch(res_dict, response)
+ self.assertThat(res_dict, matchers.DictMatches(response))
def test_cloudpipe_create(self):
def launch_vpn_instance(context):
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
new file mode 100644
index 000000000..8068776de
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
@@ -0,0 +1,72 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import cloudpipe_update
+from nova import db
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.tests import fake_network
+
+
+fake_networks = [fake_network.fake_network(1),
+ fake_network.fake_network(2)]
+
+
+def fake_project_get_networks(context, project_id, associate=True):
+ return fake_networks
+
+
+def fake_network_update(context, network_id, values):
+ for network in fake_networks:
+ if network['id'] == network_id:
+ for key in values:
+ network[key] = values[key]
+
+
+class CloudpipeUpdateTest(test.TestCase):
+
+ def setUp(self):
+ super(CloudpipeUpdateTest, self).setUp()
+ self.controller = cloudpipe_update.CloudpipeUpdateController()
+ self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
+ self.stubs.Set(db, "network_update", fake_network_update)
+
+ def test_cloudpipe_configure_project(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
+ result = self.controller.update(req, 'configure-project',
+ body=body)
+ self.assertEqual('202 Accepted', result.status)
+ self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
+ self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
+
+ def test_cloudpipe_configure_project_bad_url(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-projectx')
+ body = {"vpn_ip": "1.2.3.4", "vpn_port": 222}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req,
+ 'configure-projectx', body)
+
+ def test_cloudpipe_configure_project_bad_data(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update, req,
+ 'configure-project', body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py b/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py
new file mode 100644
index 000000000..39a883049
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_coverage_ext.py
@@ -0,0 +1,190 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License
+
+import telnetlib
+
+from coverage import coverage
+import webob
+
+from nova.api.openstack.compute.contrib import coverage_ext
+from nova import context
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+def fake_telnet(self, data):
+ return
+
+
+def fake_check_coverage(self):
+ return False
+
+
+def fake_xml_report(self, outfile):
+ return
+
+
+def fake_report(self, file):
+ return
+
+
+class CoverageExtensionTest(test.TestCase):
+
+ def setUp(self):
+ super(CoverageExtensionTest, self).setUp()
+ self.stubs.Set(telnetlib.Telnet, 'write', fake_telnet)
+ self.stubs.Set(telnetlib.Telnet, 'expect', fake_telnet)
+ self.stubs.Set(coverage, 'report', fake_report)
+ self.stubs.Set(coverage, 'xml_report', fake_xml_report)
+ self.admin_context = context.RequestContext('fakeadmin_0',
+ 'fake',
+ is_admin=True)
+ self.user_context = context.RequestContext('fakeadmin_0',
+ 'fake',
+ is_admin=False)
+
+ def test_not_admin(self):
+ body = {'start': {}}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+ self.assertEqual(res.status_int, 403)
+
+ def test_start_coverage_action(self):
+ body = {'start': {}}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 200)
+
+ def test_stop_coverage_action(self):
+ self.stubs.Set(coverage_ext.CoverageController,
+ '_check_coverage', fake_check_coverage)
+ body = {'stop': {}}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ resp_dict = jsonutils.loads(res.body)
+ self.assertTrue('path' in resp_dict)
+
+ def test_report_coverage_action_file(self):
+ self.stubs.Set(coverage_ext.CoverageController,
+ '_check_coverage', fake_check_coverage)
+ self.test_start_coverage_action()
+ body = {
+ 'report': {
+ 'file': 'coverage-unit-test.report',
+ },
+ }
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ resp_dict = jsonutils.loads(res.body)
+ self.assertTrue('path' in resp_dict)
+ self.assertTrue('coverage-unit-test.report' in resp_dict['path'])
+
+ def test_report_coverage_action_xml_file(self):
+ self.stubs.Set(coverage_ext.CoverageController,
+ '_check_coverage', fake_check_coverage)
+ body = {
+ 'report': {
+ 'file': 'coverage-xml-unit-test.report',
+ 'xml': 'True',
+ },
+ }
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ resp_dict = jsonutils.loads(res.body)
+ self.assertTrue('path' in resp_dict)
+ self.assertTrue('coverage-xml-unit-test.report' in resp_dict['path'])
+
+ def test_report_coverage_action_nofile(self):
+ self.stubs.Set(coverage_ext.CoverageController,
+ '_check_coverage', fake_check_coverage)
+ body = {'report': {}}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 400)
+
+ def test_coverage_bad_body(self):
+ body = {}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 400)
+
+ def test_coverage_report_bad_path(self):
+ self.stubs.Set(coverage_ext.CoverageController,
+ '_check_coverage', fake_check_coverage)
+ body = {
+ 'report': {
+ 'file': '/tmp/coverage-xml-unit-test.report',
+ }
+ }
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 400)
+
+ def test_stop_coverage_action_nostart(self):
+ body = {'stop': {}}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 404)
+
+ def test_report_coverage_action_nostart(self):
+ body = {'report': {}}
+ req = webob.Request.blank('/v2/fake/os-coverage/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/compute/contrib/test_createserverext.py b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
index 74af62e60..9ec866172 100644
--- a/nova/tests/api/openstack/compute/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
@@ -23,14 +23,10 @@ import webob
from nova.compute import api as compute_api
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
FAKE_UUID = fakes.FAKE_UUID
FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
@@ -103,7 +99,7 @@ class CreateserverextTest(test.TestCase):
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Createserverext', 'User_data',
- 'Security_groups', 'Networks'])
+ 'Security_groups', 'Os_networks'])
def _make_stub_method(self, canned_return):
def stub_method(*args, **kwargs):
diff --git a/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py b/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py
index e7da8f191..eba4154e2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py
+++ b/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py
@@ -61,9 +61,13 @@ class DeferredDeleteExtensionTest(test.TestCase):
compute_api.API.get(self.fake_context, self.fake_uuid).AndReturn(
fake_instance)
+
+ exc = exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
compute_api.API.force_delete(self.fake_context, fake_instance)\
- .AndRaise(
- exception.InstanceInvalidState)
+ .AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPConflict,
@@ -90,11 +94,14 @@ class DeferredDeleteExtensionTest(test.TestCase):
self.mox.StubOutWithMock(compute_api.API, 'restore')
fake_instance = 'fake_instance'
+ exc = exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
compute_api.API.get(self.fake_context, self.fake_uuid).AndReturn(
fake_instance)
compute_api.API.restore(self.fake_context, fake_instance).AndRaise(
- exception.InstanceInvalidState)
+ exc)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPConflict, self.extension._restore,
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index 9206899d6..9434ba821 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -19,7 +19,6 @@ import datetime
from nova.api.openstack import compute
import nova.db.api
-from nova import flags
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc
from nova import test
@@ -31,8 +30,6 @@ MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
stub_instance = fakes.stub_instance
-FLAGS = flags.FLAGS
-
API_DISK_CONFIG = 'OS-DCF:diskConfig'
@@ -250,7 +247,7 @@ class DiskConfigTestCase(test.TestCase):
self.assertDiskConfig(server_dict, 'AUTO')
def test_update_server_invalid_disk_config(self):
- """Return BadRequest if user passes an invalid diskConfig value."""
+ # Return BadRequest if user passes an invalid diskConfig value.
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
req.method = 'PUT'
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
index 31e40d7ca..63e1b6126 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
@@ -20,15 +20,10 @@ from nova.api.openstack.compute.contrib import extended_server_attributes
from nova import compute
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
@@ -105,7 +100,7 @@ class ExtendedServerAttributesTest(test.TestCase):
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
index 3ca4000e5..e368c5986 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_status.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
@@ -19,15 +19,10 @@ import webob
from nova.api.openstack.compute.contrib import extended_status
from nova import compute
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
@@ -103,7 +98,7 @@ class ExtendedStatusTest(test.TestCase):
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
diff --git a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
new file mode 100644
index 000000000..6b6fb8286
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
@@ -0,0 +1,163 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import fixed_ips
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ ]
+
+
+def fake_fixed_ip_get_by_address(context, address):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, FakeModel(network), None)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+
+class FakeModel(object):
+ """Stubs out for model."""
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return '<FakeModel: %s>' % self.values
+
+
+def fake_network_get_all(context):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ return [FakeModel(network)]
+
+
+class FixedIpTest(test.TestCase):
+
+ def setUp(self):
+ super(FixedIpTest, self).setUp()
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ self.context = context.get_admin_context()
+ self.controller = fixed_ips.FixedIPController()
+
+ def tearDown(self):
+ super(FixedIpTest, self).tearDown()
+
+ def test_fixed_ips_get(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/192.168.1.1')
+ res_dict = self.controller.show(req, '192.168.1.1')
+ response = {'fixed_ip': {'cidr': '192.168.1.0/24',
+ 'hostname': None,
+ 'host': None,
+ 'address': '192.168.1.1'}}
+ self.assertEqual(response, res_dict)
+
+ def test_fixed_ips_get_fail(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/10.0.0.1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.1')
+
+ def test_fixed_ip_reserve(self):
+ fake_fixed_ips[0]['reserved'] = False
+ ip_addr = '192.168.1.1'
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/192.168.1.1/action')
+ result = self.controller.action(req, "192.168.1.1", body)
+
+ self.assertEqual('202 Accepted', result.status)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], True)
+
+ def test_fixed_ip_reserve_bad_ip(self):
+ ip_addr = '10.0.0.1'
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/10.0.0.1/action')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
+ '10.0.0.1', body)
+
+ def test_fixed_ip_unreserve(self):
+ fake_fixed_ips[0]['reserved'] = True
+ ip_addr = '192.168.1.1'
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/192.168.1.1/action')
+ result = self.controller.action(req, "192.168.1.1", body)
+
+ self.assertEqual('202 Accepted', result.status)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], False)
+
+ def test_fixed_ip_unreserve_bad_ip(self):
+ ip_addr = '10.0.0.1'
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/10.0.0.1/action')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
+ '10.0.0.1', body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
index 0bf1f1b66..0818dfdd3 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
@@ -226,7 +226,8 @@ class FlavorAccessTest(test.TestCase):
def test_add_tenant_access_with_already_added_access(self):
def stub_add_instance_type_access(flavorid, projectid, ctxt=None):
- raise exception.FlavorAccessExists()
+ raise exception.FlavorAccessExists(flavor_id=flavorid,
+ project_id=projectid)
self.stubs.Set(instance_types, 'add_instance_type_access',
stub_add_instance_type_access)
body = {'addTenantAccess': {'tenant': 'proj2'}}
@@ -238,22 +239,8 @@ class FlavorAccessTest(test.TestCase):
def test_remove_tenant_access_with_bad_access(self):
def stub_remove_instance_type_access(flavorid, projectid, ctxt=None):
- self.assertEqual('3', flavorid, "flavorid")
- self.assertEqual("proj2", projectid, "projectid")
- expected = {'flavor_access': [
- {'flavor_id': '3', 'tenant_id': 'proj3'}]}
- self.stubs.Set(instance_types, 'remove_instance_type_access',
- stub_remove_instance_type_access)
- body = {'removeTenantAccess': {'tenant': 'proj2'}}
- req = fakes.HTTPRequest.blank('/v2/fake/flavors/2/action',
- use_admin_context=True)
- result = self.flavor_action_controller.\
- _addTenantAccess(req, '3', body)
- self.assertEqual(result, expected)
-
- def test_remove_tenant_access_with_bad_access(self):
- def stub_remove_instance_type_access(flavorid, projectid, ctxt=None):
- raise exception.FlavorAccessNotFound()
+ raise exception.FlavorAccessNotFound(flavor_id=flavorid,
+ project_id=projectid)
self.stubs.Set(instance_types, 'remove_instance_type_access',
stub_remove_instance_type_access)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
index 1225b56b9..5d15264ab 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
@@ -17,15 +17,10 @@ import webob
from nova.api.openstack.compute.contrib import flavor_disabled
from nova.compute import instance_types
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
index 3df9f956b..9b58e7b74 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
@@ -208,7 +208,7 @@ class FlavorManageTest(test.TestCase):
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
- raise exception.InstanceTypeExists()
+ raise exception.InstanceTypeExists(name=name)
self.stubs.Set(instance_types, "create", fake_create)
url = '/v2/fake/flavors'
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
index 52163c700..d86f750cf 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
@@ -16,15 +16,10 @@ from lxml import etree
import webob
from nova.compute import instance_types
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
index 75e9cd76b..eeb0fe632 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
@@ -16,15 +16,10 @@ from lxml import etree
import webob
from nova.compute import instance_types
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py
new file mode 100644
index 000000000..408b17d48
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py
@@ -0,0 +1,125 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ips_bulk
+from nova import context
+from nova.openstack.common import cfg
+from nova import test
+from nova.tests.api.openstack import fakes
+
+CONF = cfg.CONF
+
+
+class FloatingIPBulk(test.TestCase):
+
+ def setUp(self):
+ super(FloatingIPBulk, self).setUp()
+
+ self.context = context.get_admin_context()
+ self.controller = floating_ips_bulk.FloatingIPBulkController()
+
+ def tearDown(self):
+ super(FloatingIPBulk, self).tearDown()
+
+ def _setup_floating_ips(self, ip_range):
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk')
+ res_dict = self.controller.create(req, body)
+ response = {"floating_ips_bulk_create": {
+ 'ip_range': ip_range,
+ 'pool': CONF.default_floating_pool,
+ 'interface': CONF.public_interface}}
+ self.assertEqual(res_dict, response)
+
+ def test_create_ips(self):
+ ip_range = '192.168.1.0/24'
+ self._setup_floating_ips(ip_range)
+
+ def test_create_ips_pool(self):
+ ip_range = '10.0.1.0/20'
+ pool = 'a new pool'
+ body = {'floating_ips_bulk_create':
+ {'ip_range': ip_range,
+ 'pool': pool}}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk')
+ res_dict = self.controller.create(req, body)
+ response = {"floating_ips_bulk_create": {
+ 'ip_range': ip_range,
+ 'pool': pool,
+ 'interface': CONF.public_interface}}
+ self.assertEqual(res_dict, response)
+
+ def test_list_ips(self):
+ ip_range = '192.168.1.1/28'
+ self._setup_floating_ips(ip_range)
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk',
+ use_admin_context=True)
+ res_dict = self.controller.index(req)
+
+ ip_info = [{'address': str(ip_addr),
+ 'pool': CONF.default_floating_pool,
+ 'interface': CONF.public_interface,
+ 'project_id': None,
+ 'instance_uuid': None}
+ for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()]
+ response = {'floating_ip_info': ip_info}
+
+ self.assertEqual(res_dict, response)
+
+ def test_delete_ips(self):
+ ip_range = '192.168.1.0/20'
+ self._setup_floating_ips(ip_range)
+
+ body = {'ip_range': ip_range}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/delete')
+ res_dict = self.controller.update(req, "delete", body)
+
+ response = {"floating_ips_bulk_delete": ip_range}
+ self.assertEqual(res_dict, response)
+
+ # Check that the IPs are actually deleted
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk',
+ use_admin_context=True)
+ res_dict = self.controller.index(req)
+ response = {'floating_ip_info': []}
+ self.assertEqual(res_dict, response)
+
+ def test_create_duplicate_fail(self):
+ ip_range = '192.168.1.0/20'
+ self._setup_floating_ips(ip_range)
+
+ ip_range = '192.168.1.0/28'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body)
+
+ def test_create_bad_cidr_fail(self):
+ # netaddr can't handle /32 or 31 cidrs
+ ip_range = '192.168.1.1/32'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body)
+
+ def test_create_invalid_cidr_fail(self):
+ ip_range = 'not a cidr'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips-bulk')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
index 171b0900e..a72430fd9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
@@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid
+
from lxml import etree
import webob
@@ -29,7 +31,7 @@ from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
-from nova import utils
+
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
@@ -88,7 +90,7 @@ def network_api_disassociate(self, context, instance, floating_address):
def fake_instance_get(context, instance_id):
return {
"id": 1,
- "uuid": utils.gen_uuid(),
+ "uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'}
@@ -227,7 +229,7 @@ class FloatingIpTest(test.TestCase):
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
- raise exception.FloatingIpNotFound()
+ raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
@@ -377,7 +379,8 @@ class FloatingIpTest(test.TestCase):
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
- raise exception.FloatingIpNotFoundForAddress()
+ raise exception.FloatingIpNotFoundForAddress(
+ address=flaoting_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
@@ -393,7 +396,8 @@ class FloatingIpTest(test.TestCase):
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
- raise exception.FloatingIpNotFoundForAddress()
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
diff --git a/nova/tests/api/openstack/compute/contrib/test_fping.py b/nova/tests/api/openstack/compute/contrib/test_fping.py
new file mode 100644
index 000000000..9a838162a
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_fping.py
@@ -0,0 +1,94 @@
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.contrib import fping
+from nova.api.openstack import extensions
+from nova import exception
+from nova import test
+from nova.tests.api.openstack import fakes
+import nova.utils
+
+
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def execute(*cmd, **args):
+ return "".join(["%s is alive" % ip for ip in cmd[1:]])
+
+
+class FpingTest(test.TestCase):
+
+ def setUp(self):
+ super(FpingTest, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(nova.db, "instance_get_all_by_filters",
+ return_servers)
+ self.stubs.Set(nova.db, "instance_get_by_uuid",
+ return_server)
+ self.stubs.Set(nova.db, "instance_get_all_by_project",
+ return_servers)
+ self.stubs.Set(nova.utils, "execute",
+ execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ lambda self: None)
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = fping.FpingController(self.ext_mgr)
+
+ def test_fping_index(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
+ res_dict = self.controller.index(req)
+ self.assertTrue("servers" in res_dict)
+ for srv in res_dict["servers"]:
+ for key in "project_id", "id", "alive":
+ self.assertTrue(key in srv)
+
+ def test_fping_index_policy(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?all_tenants=1")
+ self.assertRaises(exception.NotAuthorized, self.controller.index, req)
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?all_tenants=1")
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.index(req)
+ self.assertTrue("servers" in res_dict)
+
+ def test_fping_index_include(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?include=%s" % ids[0])
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_index_exclude(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?exclude=%s" %
+ ",".join(ids[1:]))
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_show(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping/%s" % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertTrue("server" in res_dict)
+ srv = res_dict["server"]
+ for key in "project_id", "id", "alive":
+ self.assertTrue(key in srv)
diff --git a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
new file mode 100644
index 000000000..804decdff
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
@@ -0,0 +1,151 @@
+# Copyright 2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+
+from lxml import etree
+import webob
+
+from nova.api.openstack import wsgi
+from nova import compute
+from nova.compute import vm_states
+from nova import exception
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+SENTINEL = object()
+
+
+def fake_compute_get(*args, **kwargs):
+ def _return_server(*_args, **_kwargs):
+ return fakes.stub_instance(*args, **kwargs)
+ return _return_server
+
+
+class HideServerAddressesTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(HideServerAddressesTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Hide_server_addresses'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ @staticmethod
+ def _get_server(body):
+ return jsonutils.loads(body).get('server')
+
+ @staticmethod
+ def _get_servers(body):
+ return jsonutils.loads(body).get('servers')
+
+ @staticmethod
+ def _get_addresses(server):
+ return server.get('addresses', SENTINEL)
+
+ def _check_addresses(self, addresses, exists):
+ self.assertTrue(addresses is not SENTINEL)
+ if exists:
+ self.assertTrue(addresses)
+ else:
+ self.assertFalse(addresses)
+
+ def test_show_hides_in_building(self):
+ instance_id = 1
+ uuid = fakes.get_fake_uuid(instance_id)
+ self.stubs.Set(compute.api.API, 'get',
+ fake_compute_get(instance_id, uuid=uuid,
+ vm_state=vm_states.BUILDING))
+ res = self._make_request('/v2/fake/servers/%s' % uuid)
+ self.assertEqual(res.status_int, 200)
+
+ server = self._get_server(res.body)
+ addresses = self._get_addresses(server)
+ self._check_addresses(addresses, exists=False)
+
+ def test_show(self):
+ instance_id = 1
+ uuid = fakes.get_fake_uuid(instance_id)
+ self.stubs.Set(compute.api.API, 'get',
+ fake_compute_get(instance_id, uuid=uuid,
+ vm_state=vm_states.ACTIVE))
+ res = self._make_request('/v2/fake/servers/%s' % uuid)
+ self.assertEqual(res.status_int, 200)
+
+ server = self._get_server(res.body)
+ addresses = self._get_addresses(server)
+ self._check_addresses(addresses, exists=True)
+
+ def test_detail_hides_building_server_addresses(self):
+ instance_0 = fakes.stub_instance(0, uuid=fakes.get_fake_uuid(0),
+ vm_state=vm_states.ACTIVE)
+ instance_1 = fakes.stub_instance(1, uuid=fakes.get_fake_uuid(1),
+ vm_state=vm_states.BUILDING)
+ instances = [instance_0, instance_1]
+
+ def get_all(*args, **kwargs):
+ return instances
+
+ self.stubs.Set(compute.api.API, 'get_all', get_all)
+ res = self._make_request('/v2/fake/servers/detail')
+
+ self.assertEqual(res.status_int, 200)
+ servers = self._get_servers(res.body)
+
+ self.assertEqual(len(servers), len(instances))
+
+ for instance, server in itertools.izip(instances, servers):
+ addresses = self._get_addresses(server)
+ exists = (instance['vm_state'] == vm_states.ACTIVE)
+ self._check_addresses(addresses, exists=exists)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ res = self._make_request('/v2/fake/servers/' + fakes.get_fake_uuid())
+
+ self.assertEqual(res.status_int, 404)
+
+
+class HideAddressesXmlTest(HideServerAddressesTest):
+ content_type = 'application/xml'
+
+ @staticmethod
+ def _get_server(body):
+ return etree.XML(body)
+
+ @staticmethod
+ def _get_servers(body):
+ return etree.XML(body).getchildren()
+
+ @staticmethod
+ def _get_addresses(server):
+ addresses = server.find('{%s}addresses' % wsgi.XMLNS_V11)
+ if addresses is None:
+ return SENTINEL
+ return addresses
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index b2b5407f5..e103b5b19 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -19,64 +19,75 @@ import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts
from nova.compute import power_state
from nova.compute import vm_states
-from nova import context
+from nova import context as context_maker
from nova import db
-from nova import flags
from nova.openstack.common import log as logging
from nova import test
+from nova.tests import fake_hosts
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
-HOST_LIST = [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nonova"},
- {"host_name": "host_v1", "service": "volume", "zone": "nova"},
- {"host_name": "host_v2", "service": "volume", "zone": "nonova"}]
-HOST_LIST_NOVA_ZONE = [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_v1", "service": "volume", "zone": "nova"}]
-SERVICES_LIST = [
- {"host": "host_c1", "topic": "compute", "availability_zone": "nova"},
- {"host": "host_c2", "topic": "compute", "availability_zone": "nonova"},
- {"host": "host_v1", "topic": "volume", "availability_zone": "nova"},
- {"host": "host_v2", "topic": "volume", "availability_zone": "nonova"}]
-
-
-def stub_service_get_all(self, req):
- return SERVICES_LIST
-
-
-def stub_set_host_enabled(context, host, enabled):
- # We'll simulate success and failure by assuming
- # that 'host_c1' always succeeds, and 'host_c2'
- # always fails
- fail = (host == "host_c2")
- status = "enabled" if (enabled != fail) else "disabled"
- return status
-def stub_set_host_maintenance(context, host, mode):
+def stub_service_get_all(context, disabled=None):
+ return fake_hosts.SERVICES_LIST
+
+
+def stub_service_get_by_host_and_topic(context, host_name, topic):
+ for service in stub_service_get_all(context):
+ if service['host'] == host_name and service['topic'] == topic:
+ return service
+
+
+def stub_set_host_enabled(context, host_name, enabled):
+ """
+ Simulates three possible behaviours for VM drivers or compute drivers when
+ enabling or disabling a host.
+
+ 'enabled' means new instances can go to this host
+ 'disabled' means they can't
+ """
+ results = {True: "enabled", False: "disabled"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not enabled]
+ else:
+ # Do the right thing
+ return results[enabled]
+
+
+def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
- fail = (host == "host_c2")
- maintenance = "on_maintenance" if (mode != fail) else "off_maintenance"
- return maintenance
-
-
-def stub_host_power_action(context, host, action):
+ results = {True: "on_maintenance", False: "off_maintenance"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not mode]
+ else:
+ # Do the right thing
+ return results[mode]
+
+
+def stub_host_power_action(context, host_name, action):
+ if host_name == "notimplemented":
+ raise NotImplementedError()
return action
def _create_instance(**kwargs):
- """Create a test instance"""
- ctxt = context.get_admin_context()
+ """Create a test instance."""
+ ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
- """Create a dictionary for a test instance"""
+ """Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
@@ -99,12 +110,12 @@ def _create_instance_dict(**kwargs):
class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
+ environ = {"nova.context": context_maker.get_admin_context()}
GET = {}
class FakeRequestWithNovaZone(object):
- environ = {"nova.context": context.get_admin_context()}
+ environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
@@ -114,35 +125,41 @@ class HostTestCase(test.TestCase):
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
+ self.hosts_api = self.controller.api
self.req = FakeRequest()
+
+ # Pretend we have fake_hosts.HOST_LIST in the DB
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
- self.stubs.Set(self.controller.api, 'set_host_enabled',
+ # Only hosts in our fake DB exist
+ self.stubs.Set(db, 'service_get_by_host_and_topic',
+ stub_service_get_by_host_and_topic)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
- self.stubs.Set(self.controller.api, 'set_host_maintenance',
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
- self.stubs.Set(self.controller.api, 'host_power_action',
+ self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
- result = self.controller.update(self.req, host, body=body)
+ result = self.controller.update(self.req, host, body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
- hosts = os_hosts._list_hosts(self.req)
- self.assertEqual(hosts, HOST_LIST)
-
- compute_hosts = os_hosts._list_hosts(self.req, "compute")
- expected = [host for host in HOST_LIST
- if host["service"] == "compute"]
- self.assertEqual(compute_hosts, expected)
+ result = self.controller.index(self.req)
+ self.assert_('hosts' in result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_list_hosts_with_zone(self):
- req = FakeRequestWithNovaZone()
- hosts = os_hosts._list_hosts(req)
- self.assertEqual(hosts, HOST_LIST_NOVA_ZONE)
+ result = self.controller.index(FakeRequestWithNovaZone())
+ self.assert_('hosts' in result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
@@ -160,6 +177,23 @@ class HostTestCase(test.TestCase):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
+ def _test_host_update_notimpl(self, key, val):
+ def stub_service_get_all_notimpl(self, req):
+ return [{'host': 'notimplemented', 'topic': None,
+ 'availability_zone': None}]
+ self.stubs.Set(db, 'service_get_all',
+ stub_service_get_all_notimpl)
+ body = {key: val}
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ self.controller.update,
+ self.req, 'notimplemented', body=body)
+
+ def test_disable_host_notimpl(self):
+ self._test_host_update_notimpl('status', 'disable')
+
+ def test_enable_maintenance_notimpl(self):
+ self._test_host_update_notimpl('maintenance_mode', 'enable')
+
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
@@ -172,35 +206,44 @@ class HostTestCase(test.TestCase):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
+ def _test_host_power_action_notimpl(self, method):
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ method, self.req, "notimplemented")
+
+ def test_host_startup_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.startup)
+
+ def test_host_shutdown_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.shutdown)
+
+ def test_host_reboot_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.reboot)
+
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", body=bad_body)
+ self.req, "host_c1", bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", body=bad_body2)
+ self.req, "host_c1", bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", body=bad_body)
+ self.req, "host_c1", bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", body=bad_body)
+ self.req, "host_c1", bad_body)
def test_good_udpate_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
- result = self.controller.update(self.req, 'host_c1', body=body)
+ result = self.controller.update(self.req, 'host_c1', body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
- def test_bad_host(self):
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- self.req, "bogus_host_name", body={"status": "disable"})
-
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
@@ -210,7 +253,7 @@ class HostTestCase(test.TestCase):
self.req.environ["nova.context"].is_admin = True
def test_show_host_not_exist(self):
- """A host given as an argument does not exists."""
+ # A host given as an argument does not exists.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
@@ -219,9 +262,9 @@ class HostTestCase(test.TestCase):
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
- ctxt = context.get_admin_context()
+ ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
- 'report_count': 0, 'availability_zone': 'dummyzone'}
+ 'report_count': 0}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
@@ -234,8 +277,8 @@ class HostTestCase(test.TestCase):
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
- """No instance are running on the given host."""
- ctxt = context.get_admin_context()
+ """No instances are running on the given host."""
+ ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
@@ -251,7 +294,7 @@ class HostTestCase(test.TestCase):
def test_show_works_correctly(self):
"""show() works correctly as expected."""
- ctxt = context.get_admin_context()
+ ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
@@ -274,21 +317,21 @@ class HostTestCase(test.TestCase):
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
- self.deserializer = os_hosts.HostDeserializer()
+ self.deserializer = os_hosts.HostUpdateDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
- text = serializer.serialize(HOST_LIST)
+ text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
- self.assertEqual(len(HOST_LIST), len(tree))
- for i in range(len(HOST_LIST)):
+ self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
+ for i in range(len(fake_hosts.HOST_LIST)):
self.assertEqual('host', tree[i].tag)
- self.assertEqual(HOST_LIST[i]['host_name'],
+ self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
tree[i].get('host_name'))
- self.assertEqual(HOST_LIST[i]['service'],
+ self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
tree[i].get('service'))
def test_update_serializer_with_status(self):
@@ -338,9 +381,12 @@ class HostSerializerTest(test.TestCase):
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
- exemplar = dict(status='enabled', foo='bar')
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<updates><status>enabled</status><foo>bar</foo></updates>')
+ exemplar = dict(status='enabled', maintenance_mode='disable')
+ intext = """<?xml version='1.0' encoding='UTF-8'?>
+ <updates>
+ <status>enabled</status>
+ <maintenance_mode>disable</maintenance_mode>
+ </updates>"""
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
index 740477ca3..4e4d214cc 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
@@ -91,7 +91,7 @@ def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS:
if hyper['id'] == compute_id:
return hyper
- raise exception.ComputeHostNotFound
+ raise exception.ComputeHostNotFound(host=compute_id)
def fake_compute_node_statistics(context):
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index 808493f1b..44d9e8af3 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -15,23 +15,20 @@
# under the License.
import copy
-import itertools
import math
import netaddr
import uuid
import webob
-from nova.api.openstack.compute.contrib import networks
+from nova.api.openstack.compute.contrib import networks_associate
+from nova.api.openstack.compute.contrib import os_networks as networks
from nova import exception
+from nova.openstack.common import cfg
from nova import test
from nova.tests.api.openstack import fakes
-from nova import flags
-
-
-FLAGS = flags.FLAGS
-
+CONF = cfg.CONF
FAKE_NETWORKS = [
{
@@ -96,6 +93,8 @@ NEW_NETWORK = {
class FakeNetworkAPI(object):
+ _sentinel = object()
+
def __init__(self):
self.networks = copy.deepcopy(FAKE_NETWORKS)
@@ -104,14 +103,25 @@ class FakeNetworkAPI(object):
if network['id'] == network_id:
del self.networks[0]
return True
- raise exception.NetworkNotFoundForUUID()
+ raise exception.NetworkNotFoundForUUID(uuid=network_id)
def disassociate(self, context, network_uuid):
for network in self.networks:
if network.get('uuid') == network_uuid:
network['project_id'] = None
return True
- raise exception.NetworkNotFound()
+ raise exception.NetworkNotFound(network_id=network_uuid)
+
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ if host is not FakeNetworkAPI._sentinel:
+ network['host'] = host
+ if project is not FakeNetworkAPI._sentinel:
+ network['project_id'] = project
+ return True
+ raise exception.NetworkNotFound(network_id=network_uuid)
def add_network_to_project(self, context,
project_id, network_uuid=None):
@@ -133,16 +143,16 @@ class FakeNetworkAPI(object):
for network in self.networks:
if network.get('uuid') == network_id:
return network
- raise exception.NetworkNotFound()
+ raise exception.NetworkNotFound(network_id=network_id)
def create(self, context, **kwargs):
subnet_bits = int(math.ceil(math.log(kwargs.get(
- 'network_size', FLAGS.network_size), 2)))
+ 'network_size', CONF.network_size), 2)))
fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(
prefixlen_v4,
- count=kwargs.get('num_networks', FLAGS.num_networks)))
+ count=kwargs.get('num_networks', CONF.num_networks)))
new_networks = []
new_id = max((net['id'] for net in self.networks))
for index, subnet_v4 in enumerate(subnets_v4):
@@ -167,7 +177,10 @@ class NetworksTest(test.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
- self.controller = networks.NetworkController(self.fake_network_api)
+ self.controller = networks.NetworkController(
+ self.fake_network_api)
+ self.associate_controller = networks_associate\
+ .NetworkAssociateActionController(self.fake_network_api)
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -197,13 +210,35 @@ class NetworksTest(test.TestCase):
def test_network_disassociate(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- res = self.controller.action(req, uuid, {'disassociate': None})
+ res = self.controller._disassociate_host_and_project(
+ req, uuid, {'disassociate': None})
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.fake_network_api.networks[0]['project_id'], None)
+ self.assertEqual(self.fake_network_api.networks[0]['host'], None)
+
+ def test_network_disassociate_host_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_host_only(
+ req, uuid, {'disassociate_host': None})
self.assertEqual(res.status_int, 202)
+ self.assertNotEqual(self.fake_network_api.networks[0]['project_id'],
+ None)
+ self.assertEqual(self.fake_network_api.networks[0]['host'], None)
+
+ def test_network_disassociate_project_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_project_only(
+ req, uuid, {'disassociate_project': None})
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.fake_network_api.networks[0]['project_id'], None)
+ self.assertNotEqual(self.fake_network_api.networks[0]['host'], None)
def test_network_disassociate_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100/action')
self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.action,
+ self.controller._disassociate_host_and_project,
req, 100, {'disassociate': None})
def test_network_get_as_user(self):
@@ -249,6 +284,17 @@ class NetworksTest(test.TestCase):
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['network']['project_id'], 'fake')
+ def test_network_associate_with_host(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._associate_host(
+ req, uuid, {'associate_host': "TestHost"})
+ self.assertEqual(res.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ self.assertEqual(res_dict['network']['host'], 'TestHost')
+
def test_network_create(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
res_dict = self.controller.create(req, NEW_NETWORK)
diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
index b732f889c..a72f5bf0f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
@@ -24,11 +24,11 @@ from nova.tests.api.openstack import fakes
def quota_set(class_name):
return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
- 'volumes': 10, 'gigabytes': 1000, 'ram': 51200,
- 'floating_ips': 10, 'instances': 10, 'injected_files': 5,
- 'cores': 20, 'injected_file_content_bytes': 10240,
- 'security_groups': 10, 'security_group_rules': 20,
- 'key_pairs': 100, 'injected_file_path_bytes': 255}}
+ 'ram': 51200, 'floating_ips': 10, 'instances': 10,
+ 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240, 'security_groups': 10,
+ 'security_group_rules': 20, 'key_pairs': 100,
+ 'injected_file_path_bytes': 255}}
class QuotaClassSetsTest(test.TestCase):
@@ -42,10 +42,8 @@ class QuotaClassSetsTest(test.TestCase):
'instances': 10,
'cores': 20,
'ram': 51200,
- 'volumes': 10,
'floating_ips': 10,
'metadata_items': 128,
- 'gigabytes': 1000,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
@@ -62,8 +60,6 @@ class QuotaClassSetsTest(test.TestCase):
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
- self.assertEqual(qs['volumes'], 10)
- self.assertEqual(qs['gigabytes'], 1000)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
@@ -89,15 +85,13 @@ class QuotaClassSetsTest(test.TestCase):
def test_quotas_update_as_admin(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100,
- }}
+ 'key_pairs': 100}}
req = fakes.HTTPRequest.blank(
'/v2/fake4/os-quota-class-sets/test_class',
@@ -108,8 +102,7 @@ class QuotaClassSetsTest(test.TestCase):
def test_quotas_update_as_user(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
@@ -135,8 +128,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
- volumes=30,
- gigabytes=40,
ram=50,
floating_ips=60,
instances=70,
@@ -162,8 +153,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
exemplar = dict(quota_class_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
- volumes='30',
- gigabytes='40',
ram='50',
floating_ips='60',
instances='70',
@@ -177,8 +166,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
- '<volumes>30</volumes>'
- '<gigabytes>40</gigabytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<instances>70</instances>'
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index f628535a7..dab8c136e 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -25,8 +25,8 @@ from nova.tests.api.openstack import fakes
def quota_set(id):
- return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
- 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
+ return {'quota_set': {'id': id, 'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': 10,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
@@ -44,17 +44,14 @@ class QuotaSetsTest(test.TestCase):
'instances': 10,
'cores': 20,
'ram': 51200,
- 'volumes': 10,
'floating_ips': 10,
'metadata_items': 128,
- 'gigabytes': 1000,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100,
- }
+ 'key_pairs': 100}
quota_set = self.controller._format_quota_set('1234', raw_quota_set)
qs = quota_set['quota_set']
@@ -63,8 +60,6 @@ class QuotaSetsTest(test.TestCase):
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
- self.assertEqual(qs['volumes'], 10)
- self.assertEqual(qs['gigabytes'], 1000)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
@@ -85,8 +80,6 @@ class QuotaSetsTest(test.TestCase):
'instances': 10,
'cores': 20,
'ram': 51200,
- 'volumes': 10,
- 'gigabytes': 1000,
'floating_ips': 10,
'metadata_items': 128,
'injected_files': 5,
@@ -94,8 +87,7 @@ class QuotaSetsTest(test.TestCase):
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100,
- }}
+ 'key_pairs': 100}}
self.assertEqual(res_dict, expected)
@@ -113,8 +105,7 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_as_admin(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
@@ -130,8 +121,7 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_as_user(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
@@ -144,8 +134,7 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
- 'ram': -2, 'volumes': -2,
- 'gigabytes': -2, 'floating_ips': -2,
+ 'ram': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
@@ -167,8 +156,6 @@ class QuotaXMLSerializerTest(test.TestCase):
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
- volumes=30,
- gigabytes=40,
ram=50,
floating_ips=60,
instances=70,
@@ -193,8 +180,6 @@ class QuotaXMLSerializerTest(test.TestCase):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
- volumes='30',
- gigabytes='40',
ram='50',
floating_ips='60',
instances='70',
@@ -208,8 +193,6 @@ class QuotaXMLSerializerTest(test.TestCase):
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
- '<volumes>30</volumes>'
- '<gigabytes>40</gigabytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<instances>70</instances>'
diff --git a/nova/tests/api/openstack/compute/contrib/test_rescue.py b/nova/tests/api/openstack/compute/contrib/test_rescue.py
index 88e9141f7..2719c1339 100644
--- a/nova/tests/api/openstack/compute/contrib/test_rescue.py
+++ b/nova/tests/api/openstack/compute/contrib/test_rescue.py
@@ -16,12 +16,13 @@ import webob
from nova import compute
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
def rescue(self, context, instance, rescue_password=None):
@@ -71,7 +72,7 @@ class RescueTest(test.TestCase):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
- self.assertEqual(FLAGS.password_length, len(resp_json['adminPass']))
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_rescue_of_rescued_instance(self):
body = dict(rescue=None)
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index d33da9ca5..ccb58f858 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
from lxml import etree
import mox
import webob
@@ -27,17 +25,15 @@ from nova.api.openstack import xmlutil
from nova import compute
import nova.db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
-
+CONF = cfg.CONF
FAKE_UUID = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
-FLAGS = flags.FLAGS
-
class AttrDict(dict):
def __getattr__(self, k):
@@ -259,7 +255,7 @@ class TestSecurityGroups(test.TestCase):
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- for num in range(1, FLAGS.quota_security_groups + 1):
+ for num in range(1, CONF.quota_security_groups + 1):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
@@ -1000,7 +996,7 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- for num in range(100, 100 + FLAGS.quota_security_group_rules):
+ for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': '2', 'group_id': '1'
@@ -1016,9 +1012,10 @@ class TestSecurityGroupRules(test.TestCase):
req, {'security_group_rule': rule})
-class TestSecurityGroupRulesXMLDeserializer(unittest.TestCase):
+class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
def setUp(self):
+ super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
self.deserializer = security_groups.SecurityGroupRulesXMLDeserializer()
def test_create_request(self):
@@ -1066,9 +1063,10 @@ class TestSecurityGroupRulesXMLDeserializer(unittest.TestCase):
self.assertEquals(request['body'], expected)
-class TestSecurityGroupXMLDeserializer(unittest.TestCase):
+class TestSecurityGroupXMLDeserializer(test.TestCase):
def setUp(self):
+ super(TestSecurityGroupXMLDeserializer, self).setUp()
self.deserializer = security_groups.SecurityGroupXMLDeserializer()
def test_create_request(self):
@@ -1111,8 +1109,9 @@ class TestSecurityGroupXMLDeserializer(unittest.TestCase):
self.assertEquals(request['body'], expected)
-class TestSecurityGroupXMLSerializer(unittest.TestCase):
+class TestSecurityGroupXMLSerializer(test.TestCase):
def setUp(self):
+ super(TestSecurityGroupXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer = security_groups.SecurityGroupRuleTemplate()
self.index_serializer = security_groups.SecurityGroupsTemplate()
@@ -1371,7 +1370,7 @@ class SecurityGroupsOutputTest(test.TestCase):
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
index ac49fb2db..ea4565e14 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
from lxml import etree
from nova.api.openstack import compute
@@ -60,7 +58,7 @@ class ServerDiagnosticsTest(test.TestCase):
self.assertEqual(output, {'data': 'Some diagnostic info'})
-class TestServerDiagnosticsXMLSerializer(unittest.TestCase):
+class TestServerDiagnosticsXMLSerializer(test.TestCase):
namespace = wsgi.XMLNS_V11
def _tag(self, elem):
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_password.py b/nova/tests/api/openstack/compute/contrib/test_server_password.py
new file mode 100644
index 000000000..600c4eda4
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_server_password.py
@@ -0,0 +1,86 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.metadata import password
+from nova import compute
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
+
+
+class ServerPasswordTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(ServerPasswordTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', lambda *a, **kw: {'uuid': ''})
+ self.password = 'fakepass'
+
+ def fake_extract_password(instance):
+ return self.password
+
+ def fake_set_password(context, instance_uuid, password):
+ self.password = password
+
+ self.stubs.Set(password, 'extract_password', fake_extract_password)
+ self.stubs.Set(password, 'set_password', fake_set_password)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_password'])
+
+ def _make_request(self, url, method='GET'):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ req.method = method
+ res = req.get_response(
+ fakes.wsgi_app(init_only=('servers', 'os-server-password')))
+ return res
+
+ def _get_pass(self, body):
+ return jsonutils.loads(body).get('password')
+
+ def test_get_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), 'fakepass')
+
+ def test_reset_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url, 'DELETE')
+ self.assertEqual(res.status_int, 204)
+
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), '')
+
+
+class ServerPasswordXmlTest(ServerPasswordTest):
+ content_type = 'application/xml'
+
+ def _get_pass(self, body):
+ # NOTE(vish): first element is password
+ return etree.XML(body).text or ''
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index e0d3cbb0f..554379e74 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
import mox
import webob
@@ -64,7 +62,3 @@ class ServerStartStopTest(test.TestCase):
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
new file mode 100644
index 000000000..1bd47b67a
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -0,0 +1,198 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from datetime import datetime
+from nova.api.openstack.compute.contrib import services
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+fake_services_list = [{'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'id': 1,
+ 'disabled': True,
+ 'topic': 'scheduler',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 2),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 27)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'disabled': True,
+ 'topic': 'compute',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 27)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'id': 3,
+ 'disabled': False,
+ 'topic': 'scheduler',
+ 'updated_at': datetime(2012, 9, 19, 6, 55, 34),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 28)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'disabled': True,
+ 'topic': 'compute',
+ 'updated_at': datetime(2012, 9, 18, 8, 3, 38),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 28)},
+ ]
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithSevice(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"service": "nova-compute"}
+
+
+class FakeRequestWithHost(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1"}
+
+
+class FakeRequestWithHostService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1", "service": "nova-compute"}
+
+
+def fake_service_get_all(context):
+ return fake_services_list
+
+
+def fake_service_get_by_host_binary(context, host, binary):
+ for service in fake_services_list:
+ if service['host'] == host and service['binary'] == binary:
+ return service
+ return None
+
+
+def fake_service_get_by_id(value):
+ for service in fake_services_list:
+ if service['id'] == value:
+ return service
+ return None
+
+
+def fake_service_update(context, service_id, values):
+ service = fake_service_get_by_id(service_id)
+ if service is None:
+ raise exception.ServiceNotFound(service_id=service_id)
+ else:
+ {'host': 'host1', 'service': 'nova-compute',
+ 'disabled': values['disabled']}
+
+
+def fake_utcnow():
+ return datetime(2012, 10, 29, 13, 42, 11)
+
+
+class ServicesTest(test.TestCase):
+
+ def setUp(self):
+ super(ServicesTest, self).setUp()
+
+ self.stubs.Set(db, "service_get_all", fake_service_get_all)
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(db, "service_get_by_args",
+ fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update", fake_service_update)
+
+ self.context = context.get_admin_context()
+ self.controller = services.ServiceController()
+
+ def tearDown(self):
+ super(ServicesTest, self).tearDown()
+
+ def test_services_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-scheduler',
+ 'host': 'host1', 'zone': 'internal',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1', 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-scheduler', 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled', 'state': 'down',
+ 'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
+ {'binary': 'nova-compute', 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'down',
+ 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host(self):
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_service(self):
+ req = FakeRequestWithSevice()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-compute', 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'down',
+ 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host_service(self):
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_enable(self):
+ body = {'host': 'host1', 'service': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+ res_dict = self.controller.update(req, "enable", body)
+
+ self.assertEqual(res_dict['disabled'], False)
+
+ def test_services_disable(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
+ body = {'host': 'host1', 'service': 'nova-compute'}
+ res_dict = self.controller.update(req, "disable", body)
+
+ self.assertEqual(res_dict['disabled'], True)
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
index 0a4610aea..b49a1feb4 100644
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -23,7 +23,6 @@ import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
from nova.compute import api
from nova import context
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
@@ -31,9 +30,6 @@ from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
SERVERS = 5
TENANTS = 2
HOURS = 24
diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
index 6e76fc04a..a223178fb 100644
--- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py
+++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
@@ -19,7 +19,6 @@ import webob
from nova.api.openstack.compute.contrib import volumes
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -27,8 +26,6 @@ from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import cinder
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
_last_param = {}
diff --git a/nova/tests/api/openstack/compute/contrib/test_used_limits.py b/nova/tests/api/openstack/compute/contrib/test_used_limits.py
index 3ab814ebb..ce2322bfe 100644
--- a/nova/tests/api/openstack/compute/contrib/test_used_limits.py
+++ b/nova/tests/api/openstack/compute/contrib/test_used_limits.py
@@ -24,8 +24,10 @@ from nova import test
class FakeRequest(object):
- def __init__(self, context):
+ def __init__(self, context, reserved=False):
self.environ = {'nova.context': context}
+ self.reserved = reserved
+ self.GET = {'reserved': 1} if reserved else {}
class UsedLimitsTestCase(test.TestCase):
@@ -36,9 +38,9 @@ class UsedLimitsTestCase(test.TestCase):
self.controller = used_limits.UsedLimitsController()
self.fake_context = nova.context.RequestContext('fake', 'fake')
- self.fake_req = FakeRequest(self.fake_context)
- def test_used_limits(self):
+ def _do_test_used_limits(self, reserved):
+ fake_req = FakeRequest(self.fake_context, reserved=reserved)
obj = {
"limits": {
"rate": [],
@@ -50,26 +52,35 @@ class UsedLimitsTestCase(test.TestCase):
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
- 'totalVolumesUsed': 'volumes',
- 'totalVolumeGigabytesUsed': 'gigabytes',
- 'totalSecurityGroupsUsed': 'floating_ips',
- 'totalKeyPairsUsed': 'key_pairs',
+ 'totalFloatingIpsUsed': 'floating_ips',
+ 'totalSecurityGroupsUsed': 'security_groups',
}
limits = {}
for display_name, q in quota_map.iteritems():
- limits[q] = {'limit': 10, 'in_use': 2}
+ limits[q] = {'limit': len(display_name),
+ 'in_use': len(display_name) / 2,
+ 'reserved': len(display_name) / 3}
def stub_get_project_quotas(context, project_id, usages=True):
return limits
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
- self.controller.index(self.fake_req, res)
+ self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
for used_limit, value in abs_limits.iteritems():
- self.assertEqual(value, limits[quota_map[used_limit]]['in_use'])
+ r = limits[quota_map[used_limit]]['reserved'] if reserved else 0
+ self.assertEqual(value,
+ limits[quota_map[used_limit]]['in_use'] + r)
+
+ def test_used_limits_basic(self):
+ self._do_test_used_limits(False)
+
+ def test_used_limits_with_reserved(self):
+ self._do_test_used_limits(True)
def test_used_ram_added(self):
+ fake_req = FakeRequest(self.fake_context)
obj = {
"limits": {
"rate": [],
@@ -84,12 +95,13 @@ class UsedLimitsTestCase(test.TestCase):
return {'ram': {'limit': 512, 'in_use': 256}}
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
- self.controller.index(self.fake_req, res)
+ self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
self.assertTrue('totalRAMUsed' in abs_limits)
self.assertEqual(abs_limits['totalRAMUsed'], 256)
def test_no_ram_quota(self):
+ fake_req = FakeRequest(self.fake_context)
obj = {
"limits": {
"rate": [],
@@ -102,11 +114,12 @@ class UsedLimitsTestCase(test.TestCase):
return {}
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
- self.controller.index(self.fake_req, res)
+ self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
self.assertFalse('totalRAMUsed' in abs_limits)
def test_used_limits_xmlns(self):
+ fake_req = FakeRequest(self.fake_context)
obj = {
"limits": {
"rate": [],
@@ -120,6 +133,6 @@ class UsedLimitsTestCase(test.TestCase):
return {}
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
- self.controller.index(self.fake_req, res)
+ self.controller.index(fake_req, res)
response = res.serialize(None, 'xml')
self.assertTrue(used_limits.XMLNS in response.body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index 6c092cbd4..3119f55e8 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -22,8 +22,7 @@ from nova.api.openstack.compute.contrib import volumes
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova import context
-from nova import db
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
@@ -31,8 +30,8 @@ from nova.tests.api.openstack import fakes
from nova.volume import cinder
from webob import exc
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
@@ -129,23 +128,18 @@ class BootFromVolumeTest(test.TestCase):
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual(FLAGS.password_length, len(server['adminPass']))
+ self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(_block_device_mapping_seen), 1)
self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(_block_device_mapping_seen[0]['device_name'],
'/dev/vda')
-def return_volume(context, volume_id):
- return {'id': volume_id}
-
-
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(db, 'volume_get', return_volume)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
@@ -295,6 +289,27 @@ class VolumeAttachTests(test.TestCase):
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
+ def test_attach_volume_bad_id(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+ attachments = volumes.VolumeAttachmentController()
+
+ body = {
+ 'volumeAttachment': {
+ 'device': None,
+ 'volumeId': 'TESTVOLUME',
+ }
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-volumes/attach')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = jsonutils.dumps(body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest, attachments.create,
+ req, FAKE_UUID, body)
+
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
diff --git a/nova/tests/api/openstack/compute/extensions/foxinsocks.py b/nova/tests/api/openstack/compute/extensions/foxinsocks.py
index cf901472c..fdc612047 100644
--- a/nova/tests/api/openstack/compute/extensions/foxinsocks.py
+++ b/nova/tests/api/openstack/compute/extensions/foxinsocks.py
@@ -61,7 +61,7 @@ class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
class Foxinsocks(extensions.ExtensionDescriptor):
- """The Fox In Socks Extension"""
+ """The Fox In Socks Extension."""
name = "Fox In Socks"
alias = "FOXNSOX"
diff --git a/nova/tests/api/openstack/compute/test_consoles.py b/nova/tests/api/openstack/compute/test_consoles.py
index 6ea8149cf..8f0a02e30 100644
--- a/nova/tests/api/openstack/compute/test_consoles.py
+++ b/nova/tests/api/openstack/compute/test_consoles.py
@@ -17,6 +17,7 @@
# under the License.
import datetime
+import uuid as stdlib_uuid
from lxml import etree
import webob
@@ -26,14 +27,12 @@ from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
@@ -58,7 +57,7 @@ class FakeInstanceDB(object):
if id is None:
id = self.max_id + 1
if uuid is None:
- uuid = str(utils.gen_uuid())
+ uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
@@ -134,7 +133,7 @@ class ConsolesControllerTest(test.TestCase):
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
- self.uuid = str(utils.gen_uuid())
+ self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.Controller()
@@ -167,7 +166,7 @@ class ConsolesControllerTest(test.TestCase):
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
- self.assertDictMatch(res_dict, expected)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
@@ -211,7 +210,7 @@ class ConsolesControllerTest(test.TestCase):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
- self.assertDictMatch(res_dict, expected)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index ceb90d24c..485968209 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -25,13 +25,13 @@ from nova.api.openstack.compute import extensions as compute_extensions
from nova.api.openstack import extensions as base_extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import matchers
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
NS = "{http://docs.openstack.org/common/api/v1.0}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -98,7 +98,7 @@ class StubLateExtensionController(wsgi.Controller):
class StubExtensionManager(object):
- """Provides access to Tweedle Beetles"""
+ """Provides access to Tweedle Beetles."""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
@@ -141,7 +141,7 @@ class StubExtensionManager(object):
class ExtensionTestCase(test.TestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
- ext_list = FLAGS.osapi_compute_extension[:]
+ ext_list = CONF.osapi_compute_extension[:]
fox = ('nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
if fox not in ext_list:
@@ -157,8 +157,10 @@ class ExtensionControllerTest(ExtensionTestCase):
"AdminActions",
"Aggregates",
"AvailabilityZone",
+ "Agents",
"Certificates",
"Cloudpipe",
+ "CloudpipeUpdate",
"ConsoleOutput",
"Consoles",
"Createserverext",
@@ -166,6 +168,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"DiskConfig",
"ExtendedStatus",
"ExtendedServerAttributes",
+ "FixedIPs",
"FlavorAccess",
"FlavorDisabled",
"FlavorExtraSpecs",
@@ -176,19 +179,21 @@ class ExtensionControllerTest(ExtensionTestCase):
"FloatingIps",
"FloatingIpDns",
"FloatingIpPools",
+ "FloatingIpsBulk",
"Fox In Socks",
"Hosts",
"Keypairs",
"Multinic",
"MultipleCreate",
- "Networks",
"QuotaClasses",
"Quotas",
"Rescue",
"SchedulerHints",
"SecurityGroups",
"ServerDiagnostics",
+ "ServerPassword",
"ServerStartStop",
+ "Services",
"SimpleTenantUsage",
"UsedLimits",
"UserData",
@@ -221,7 +226,7 @@ class ExtensionControllerTest(ExtensionTestCase):
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
- 'description': 'The Fox In Socks Extension',
+ 'description': 'The Fox In Socks Extension.',
'alias': 'FOXNSOX',
'links': []
},
@@ -245,7 +250,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
- "description": "The Fox In Socks Extension",
+ "description": "The Fox In Socks Extension.",
"alias": "FOXNSOX",
"links": []})
@@ -276,7 +281,7 @@ class ExtensionControllerTest(ExtensionTestCase):
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
- 'The Fox In Socks Extension')
+ 'The Fox In Socks Extension.')
xmlutil.validate_schema(root, 'extensions')
@@ -296,7 +301,7 @@ class ExtensionControllerTest(ExtensionTestCase):
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(root.findtext('{0}description'.format(NS)),
- 'The Fox In Socks Extension')
+ 'The Fox In Socks Extension.')
xmlutil.validate_schema(root, 'extension')
@@ -347,7 +352,7 @@ class ResourceExtensionTest(ExtensionTestCase):
"code": 400
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_resource(self):
res_ext = base_extensions.ResourceExtension('tweedles',
@@ -365,7 +370,7 @@ class ResourceExtensionTest(ExtensionTestCase):
"code": 404
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
class InvalidExtension(object):
@@ -430,7 +435,7 @@ class ActionExtensionTest(ExtensionTestCase):
"code": 400
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_action(self):
body = dict(blah=dict(name="test"))
@@ -451,7 +456,7 @@ class ActionExtensionTest(ExtensionTestCase):
"code": 400
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
class RequestExtensionTest(ExtensionTestCase):
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
index 77d40df03..947a2e294 100644
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ b/nova/tests/api/openstack/compute/test_flavors.py
@@ -26,13 +26,9 @@ import nova.compute.instance_types
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
-
-
-FLAGS = flags.FLAGS
-
+from nova.tests import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -81,7 +77,7 @@ def empty_instance_type_get_all(inactive=False, filters=None):
def return_instance_type_not_found(flavor_id):
- raise exception.InstanceTypeNotFound(flavor_id=flavor_id)
+ raise exception.InstanceTypeNotFound(instance_type_id=flavor_id)
class FlavorsTest(test.TestCase):
@@ -219,7 +215,7 @@ class FlavorsTest(test.TestCase):
'rel': 'next'}
]
}
- self.assertDictMatch(flavor, expected)
+ self.assertThat(flavor, matchers.DictMatches(expected))
def test_get_flavor_detail_with_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/flavors/detail?limit=1')
@@ -251,7 +247,8 @@ class FlavorsTest(test.TestCase):
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['1'], 'marker': ['1']}, params)
+ self.assertThat({'limit': ['1'], 'marker': ['1']},
+ matchers.DictMatches(params))
def test_get_flavor_with_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
@@ -297,7 +294,8 @@ class FlavorsTest(test.TestCase):
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['2'], 'marker': ['2']}, params)
+ self.assertThat({'limit': ['2'], 'marker': ['2']},
+ matchers.DictMatches(params))
def test_get_flavor_list_detail(self):
req = fakes.HTTPRequest.blank('/v2/fake/flavors/detail')
@@ -352,7 +350,7 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
- """Flavor lists may be filtered by minRam."""
+ # Flavor lists may be filtered by minRam.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
@@ -376,13 +374,13 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
- """Ensure you cannot list flavors with invalid minRam param."""
+ # Ensure you cannot list flavors with invalid minRam param.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
- """Flavor lists may be filtered by minDisk."""
+ # Flavor lists may be filtered by minDisk.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
@@ -406,7 +404,7 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
- """Ensure you cannot list flavors with invalid minDisk param."""
+ # Ensure you cannot list flavors with invalid minDisk param.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
diff --git a/nova/tests/api/openstack/compute/test_image_metadata.py b/nova/tests/api/openstack/compute/test_image_metadata.py
index 1cd46902b..9a8b75c9e 100644
--- a/nova/tests/api/openstack/compute/test_image_metadata.py
+++ b/nova/tests/api/openstack/compute/test_image_metadata.py
@@ -18,13 +18,12 @@
import webob
from nova.api.openstack.compute import image_metadata
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class ImageMetaDataTest(test.TestCase):
@@ -134,7 +133,7 @@ class ImageMetaDataTest(test.TestCase):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
overload = {}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
overload['key%s' % num] = 'value%s' % num
body = {'meta': overload}
req.body = jsonutils.dumps(body)
@@ -176,7 +175,7 @@ class ImageMetaDataTest(test.TestCase):
def test_too_many_metadata_items_on_create(self):
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
diff --git a/nova/tests/api/openstack/compute/test_images.py b/nova/tests/api/openstack/compute/test_images.py
index af1dee30b..09e727da3 100644
--- a/nova/tests/api/openstack/compute/test_images.py
+++ b/nova/tests/api/openstack/compute/test_images.py
@@ -29,14 +29,10 @@ from nova.api.openstack.compute import images
from nova.api.openstack.compute.views import images as images_view
from nova.api.openstack import xmlutil
from nova import exception
-from nova import flags
+from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
-
-
-FLAGS = flags.FLAGS
-
+from nova.tests import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -66,7 +62,7 @@ class ImagesControllerTest(test.TestCase):
href = "http://localhost/v2/fake/images/124"
bookmark = "http://localhost/fake/images/124"
- alternate = "%s/fake/images/124" % utils.generate_glance_url()
+ alternate = "%s/fake/images/124" % glance.generate_glance_url()
server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
server_href = "http://localhost/v2/fake/servers/" + server_uuid
server_bookmark = "http://localhost/fake/servers/" + server_uuid
@@ -112,7 +108,7 @@ class ImagesControllerTest(test.TestCase):
},
}
- self.assertDictMatch(expected_image, actual_image)
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
def test_get_image_with_custom_prefix(self):
self.flags(osapi_compute_link_prefix='https://zoo.com:42',
@@ -166,7 +162,7 @@ class ImagesControllerTest(test.TestCase):
}],
},
}
- self.assertDictMatch(expected_image, actual_image)
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
def test_get_image_404(self):
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/unknown')
@@ -204,7 +200,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 123),
+ "href": alternate % (glance.generate_glance_url(), 123),
}],
},
{
@@ -242,7 +238,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 124),
+ "href": alternate % (glance.generate_glance_url(), 124),
}],
},
{
@@ -280,7 +276,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/125" % utils.generate_glance_url()
+ "href": "%s/fake/images/125" % glance.generate_glance_url()
}],
},
{
@@ -318,7 +314,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/126" % utils.generate_glance_url()
+ "href": "%s/fake/images/126" % glance.generate_glance_url()
}],
},
{
@@ -356,7 +352,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/127" % utils.generate_glance_url()
+ "href": "%s/fake/images/127" % glance.generate_glance_url()
}],
},
{
@@ -394,7 +390,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/128" % utils.generate_glance_url()
+ "href": "%s/fake/images/128" % glance.generate_glance_url()
}],
},
{
@@ -432,7 +428,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/129" % utils.generate_glance_url()
+ "href": "%s/fake/images/129" % glance.generate_glance_url()
}],
},
{
@@ -456,12 +452,12 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": "%s/fake/images/130" % utils.generate_glance_url()
+ "href": "%s/fake/images/130" % glance.generate_glance_url()
}],
},
]
- self.assertDictListMatch(expected, response_list)
+ self.assertThat(expected, matchers.DictListMatches(response_list))
def test_get_image_details_with_limit(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2')
@@ -495,7 +491,7 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 123),
+ "href": alternate % (glance.generate_glance_url(), 123),
}],
},
{
@@ -533,17 +529,18 @@ class ImagesControllerTest(test.TestCase):
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
- "href": alternate % (utils.generate_glance_url(), 124),
+ "href": alternate % (glance.generate_glance_url(), 124),
}],
}]
- self.assertDictListMatch(expected, response_list)
+ self.assertThat(expected, matchers.DictListMatches(response_list))
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['2'], 'marker': ['124']}, params)
+ self.assertThat({'limit': ['2'], 'marker': ['124']},
+ matchers.DictMatches(params))
def test_image_detail_filter_with_name(self):
image_service = self.mox.CreateMockAnything()
@@ -659,7 +656,7 @@ class ImagesControllerTest(test.TestCase):
view = images_view.ViewBuilder()
request = fakes.HTTPRequest.blank('/v2/fake/images/1')
generated_url = view._get_alternate_link(request, 1)
- actual_url = "%s/fake/images/1" % utils.generate_glance_url()
+ actual_url = "%s/fake/images/1" % glance.generate_glance_url()
self.assertEqual(generated_url, actual_url)
def test_delete_image(self):
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index 84c000035..f0f2f02d5 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -30,6 +30,7 @@ from nova.api.openstack import xmlutil
import nova.context
from nova.openstack.common import jsonutils
from nova import test
+from nova.tests import matchers
TEST_LIMITS = [
@@ -100,7 +101,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
return request
def test_empty_index_json(self):
- """Test getting empty limit details in JSON."""
+ # Test getting empty limit details in JSON.
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
@@ -113,15 +114,13 @@ class LimitsControllerTest(BaseLimitTestSuite):
self.assertEqual(expected, body)
def test_index_json(self):
- """Test getting limit details in JSON."""
+ # Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'ram': 512,
'instances': 5,
'cores': 21,
- 'gigabytes': 512,
- 'volumes': 5,
'key_pairs': 10,
'floating_ips': 10,
'security_groups': 10,
@@ -170,8 +169,6 @@ class LimitsControllerTest(BaseLimitTestSuite):
"maxTotalRAMSize": 512,
"maxTotalInstances": 5,
"maxTotalCores": 21,
- "maxTotalVolumeGigabytes": 512,
- "maxTotalVolumes": 5,
"maxTotalKeypairs": 10,
"maxTotalFloatingIps": 10,
"maxSecurityGroups": 10,
@@ -192,7 +189,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
return request
def test_index_diff_regex(self):
- """Test getting limit details in JSON."""
+ # Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
@@ -311,17 +308,17 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
self.__class__.__module__)
def test_limit_class(self):
- """Test that middleware selected correct limiter class."""
+ # Test that middleware selected correct limiter class.
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
- """Test successful GET request through middleware."""
+ # Test successful GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
- """Test a rate-limited (413) GET request through middleware."""
+ # Test a rate-limited (413) GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
@@ -344,7 +341,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
self.assertEqual(retryAfter, "60")
def test_limited_request_xml(self):
- """Test a rate-limited (413) response as XML"""
+ # Test a rate-limited (413) response as XML.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
@@ -374,7 +371,7 @@ class LimitTest(BaseLimitTestSuite):
"""
def test_GET_no_delay(self):
- """Test a limit handles 1 GET per second."""
+ # Test a limit handles 1 GET per second.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
@@ -382,7 +379,7 @@ class LimitTest(BaseLimitTestSuite):
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
- """Test two calls to 1 GET per second limit."""
+ # Test two calls to 1 GET per second limit.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
@@ -407,32 +404,32 @@ class ParseLimitsTest(BaseLimitTestSuite):
"""
def test_invalid(self):
- """Test that parse_limits() handles invalid input correctly."""
+ # Test that parse_limits() handles invalid input correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
- """Test that parse_limits() handles bad rules correctly."""
+ # Test that parse_limits() handles bad rules correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
- """Test that parse_limits() handles missing args correctly."""
+ # Test that parse_limits() handles missing args correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
- """Test that parse_limits() handles bad values correctly."""
+ # Test that parse_limits() handles bad values correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
- """Test that parse_limits() handles bad units correctly."""
+ # Test that parse_limits() handles bad units correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
- """Test that parse_limits() handles multiple rules correctly."""
+ # Test that parse_limits() handles multiple rules correctly.
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
@@ -496,9 +493,7 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(delay, (None, None))
def test_no_delay_PUT(self):
- """
- Simple test to ensure no delay on a single call for a known limit.
- """
+ # Simple test to ensure no delay on a single call for a known limit.
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual(delay, (None, None))
@@ -526,9 +521,7 @@ class LimiterTest(BaseLimitTestSuite):
self.failUnlessAlmostEqual(expected, results, 8)
def test_delay_GET(self):
- """
- Ensure the 11th GET will result in NO delay.
- """
+ # Ensure the 11th GET will result in NO delay.
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
@@ -567,9 +560,7 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(expected, results)
def test_multiple_delays(self):
- """
- Ensure multiple requests still get a delay.
- """
+ # Ensure multiple requests still get a delay.
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
@@ -581,15 +572,11 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(expected, results)
def test_user_limit(self):
- """
- Test user-specific limits.
- """
+ # Test user-specific limits.
self.assertEqual(self.limiter.levels['user3'], [])
def test_multiple_users(self):
- """
- Tests involving multiple users.
- """
+ # Tests involving multiple users.
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
@@ -655,7 +642,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
self.assertEqual(response.status_int, 204)
def test_invalid_methods(self):
- """Only POSTs should work."""
+ # Only POSTs should work.
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
@@ -797,12 +784,12 @@ class WsgiLimiterProxyTest(BaseLimitTestSuite):
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
def test_200(self):
- """Successful request test."""
+ # Successful request test.
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_403(self):
- """Forbidden request test."""
+ # Forbidden request test.
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual(delay, (None, None))
@@ -866,7 +853,7 @@ class LimitsViewBuilderTest(test.TestCase):
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
- self.assertDictMatch(output, expected_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
@@ -875,7 +862,7 @@ class LimitsViewBuilderTest(test.TestCase):
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
- self.assertDictMatch(output, expected_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
class LimitsXMLSerializationTest(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 0afa00f2b..d4c93ef39 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -14,6 +14,7 @@
# under the License.
import base64
+import uuid
import mox
import webob
@@ -24,21 +25,22 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
-from nova import flags
from nova.image import glance
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
-def return_server_not_found(context, uuid):
+def return_server_not_found(*arg, **kwarg):
raise exception.NotFound()
@@ -176,13 +178,15 @@ class ServerActionsControllerTest(test.TestCase):
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
- req, str(utils.gen_uuid()), body)
+ req, str(uuid.uuid4()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
@@ -191,6 +195,18 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_reboot,
req, FAKE_UUID, body)
+ def test_reboot_raises_unprocessable_entity(self):
+ body = dict(reboot=dict(type="HARD"))
+
+ def fake_reboot(*args, **kwargs):
+ raise NotImplementedError()
+
+ self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
@@ -209,7 +225,7 @@ class ServerActionsControllerTest(test.TestCase):
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
- FLAGS.password_length)
+ CONF.password_length)
self.assertEqual(robj['location'], self_href)
@@ -292,7 +308,9 @@ class ServerActionsControllerTest(test.TestCase):
}
def fake_rebuild(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
@@ -333,6 +351,21 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_rebuild,
req, FAKE_UUID, body)
+ def test_rebuild_with_too_large_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": {
+ 256 * "k": "value"
+ }
+ }
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild, req,
+ FAKE_UUID, body)
+
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
@@ -571,11 +604,36 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_resize,
req, FAKE_UUID, body)
+ def test_resize_with_server_not_found(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.stubs.Set(compute_api.API, 'get', return_server_not_found)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_too_many_instances(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.TooManyInstances(message="TooManyInstance")
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(exception.TooManyInstances,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
self.stubs.Set(compute_api.API, 'resize', fake_resize)
@@ -619,7 +677,9 @@ class ServerActionsControllerTest(test.TestCase):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
self.stubs.Set(compute_api.API, 'confirm_resize',
fake_confirm_resize)
@@ -645,6 +705,14 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_revert_resize,
req, FAKE_UUID, body)
+ def test_revert_resize_server_not_found(self):
+ body = dict(revertResize=None)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob. exc.HTTPNotFound,
+ self.controller._action_revert_resize,
+ req, "bad_server_id", body)
+
def test_revert_resize_server(self):
body = dict(revertResize=None)
@@ -664,7 +732,9 @@ class ServerActionsControllerTest(test.TestCase):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
self.stubs.Set(compute_api.API, 'revert_resize',
fake_revert_resize)
@@ -726,7 +796,7 @@ class ServerActionsControllerTest(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -825,7 +895,7 @@ class ServerActionsControllerTest(test.TestCase):
'metadata': {},
},
}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequest.blank(self.url)
@@ -867,7 +937,9 @@ class ServerActionsControllerTest(test.TestCase):
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
- raise exception.InstanceInvalidState
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
self.stubs.Set(compute_api.API, 'snapshot', snapshot)
body = {
@@ -1052,7 +1124,7 @@ class TestServerActionXMLDeserializer(test.TestCase):
],
},
}
- self.assertDictMatch(request['body'], expected)
+ self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
@@ -1065,7 +1137,7 @@ class TestServerActionXMLDeserializer(test.TestCase):
"imageRef": "http://localhost/images/1",
},
}
- self.assertDictMatch(request['body'], expected)
+ self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index bcce624d7..1e992c2a3 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -15,20 +15,21 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid
+
import webob
from nova.api.openstack.compute import server_metadata
from nova.compute import rpcapi as compute_rpcapi
import nova.db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
@@ -65,7 +66,7 @@ def stub_server_metadata():
def stub_max_server_metadata():
metadata = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items):
+ for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
@@ -85,7 +86,7 @@ def return_server_by_uuid(context, server_uuid):
def return_server_nonexistent(context, server_id):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id=server_id)
def fake_change_instance_metadata(self, context, instance, diff):
@@ -108,7 +109,7 @@ class ServerMetaDataTest(test.TestCase):
fake_change_instance_metadata)
self.controller = server_metadata.Controller()
- self.uuid = str(utils.gen_uuid())
+ self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
def test_index(self):
@@ -252,6 +253,22 @@ class ServerMetaDataTest(test.TestCase):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.uuid, body)
+ def test_update_metadata(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'key1': 'updatedvalue',
+ 'key29': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ response = self.controller.update_all(req, self.uuid, expected)
+ self.assertEqual(expected, response)
+
def test_update_all(self):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
@@ -416,7 +433,7 @@ class ServerMetaDataTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
@@ -455,7 +472,7 @@ class ServerMetaDataTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
@@ -469,7 +486,7 @@ class ServerMetaDataTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index afa181ee3..2567558ab 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -19,6 +19,7 @@
import base64
import datetime
import urlparse
+import uuid
import iso8601
from lxml import etree
@@ -38,19 +39,24 @@ from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
-from nova import flags
from nova.network import manager
from nova.network.quantumv2 import api as quantum_api
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
+from nova.openstack.common import policy as common_policy
from nova.openstack.common import rpc
+from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
from nova.tests.image import fake
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+CONF.import_opt('scheduler_topic', 'nova.scheduler.rpcapi')
+
FAKE_UUID = fakes.FAKE_UUID
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -71,6 +77,10 @@ def return_servers_by_reservation(context, reservation_id=""):
reservation_id=reservation_id) for i in xrange(5)]
+def return_servers_empty(context, *args, **kwargs):
+ return []
+
+
def return_servers_by_reservation_empty(context, reservation_id=""):
return []
@@ -166,7 +176,8 @@ class ServersControllerTest(test.TestCase):
self.ext_mgr.extensions = {}
self.controller = servers.Controller(self.ext_mgr)
self.ips_controller = ips.Controller()
-
+ policy.reset()
+ policy.init()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
@@ -236,7 +247,7 @@ class ServersControllerTest(test.TestCase):
"""Create two servers with the same host and different
project_ids and check that the hostId's are unique"""
def return_instance_with_host(self, *args):
- project_id = str(utils.gen_uuid())
+ project_id = str(uuid.uuid4())
return fakes.stub_instance(id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
@@ -315,7 +326,7 @@ class ServersControllerTest(test.TestCase):
}
}
- self.assertDictMatch(res_dict, expected_server)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost/fake/images/10"
@@ -381,7 +392,7 @@ class ServersControllerTest(test.TestCase):
}
}
- self.assertDictMatch(res_dict, expected_server)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_id_image_ref_by_id(self):
image_ref = "10"
@@ -450,7 +461,7 @@ class ServersControllerTest(test.TestCase):
}
}
- self.assertDictMatch(res_dict, expected_server)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_addresses_from_cache(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
@@ -501,7 +512,7 @@ class ServersControllerTest(test.TestCase):
],
},
}
- self.assertDictMatch(res_dict, expected)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
def test_get_server_addresses_nonexistent_network(self):
url = '/v2/fake/servers/%s/ips/network_0' % FAKE_UUID
@@ -511,15 +522,25 @@ class ServersControllerTest(test.TestCase):
def test_get_server_addresses_nonexistent_server(self):
def fake_instance_get(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- server_id = str(utils.gen_uuid())
+ server_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/ips' % server_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, server_id)
+ def test_get_server_list_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ res_dict = self.controller.index(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
def test_get_server_list_with_reservation_id(self):
self.stubs.Set(db, 'instance_get_all_by_reservation',
return_servers_by_reservation)
@@ -596,13 +617,23 @@ class ServersControllerTest(test.TestCase):
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
- self.assertDictMatch(expected_params, params)
+ self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
+ def test_get_server_details_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/detail')
+ res_dict = self.controller.index(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
def test_get_server_details_with_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail?limit=3')
res = self.controller.detail(req)
@@ -618,7 +649,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual('/v2/fake/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
- self.assertDictMatch(expected, params)
+ self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail?limit=aaa')
@@ -640,9 +671,9 @@ class ServersControllerTest(test.TestCase):
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
-
- self.assertDictMatch({'limit': ['3'], 'blah': ['2:t'],
- 'marker': [fakes.get_fake_uuid(2)]}, params)
+ expected = {'limit': ['3'], 'blah': ['2:t'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers?limit=30')
@@ -672,7 +703,7 @@ class ServersControllerTest(test.TestCase):
self.controller.index, req)
def test_get_servers_with_bad_option(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -688,7 +719,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_image(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -739,7 +770,7 @@ class ServersControllerTest(test.TestCase):
self.assertTrue('servers' in res)
- def test_admin_all_tenants(self):
+ def test_all_tenants_pass_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None):
self.assertNotEqual(filters, None)
@@ -749,29 +780,43 @@ class ServersControllerTest(test.TestCase):
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
- req = fakes.HTTPRequest.blank('/v2/fake/servers?all_tenants=1',
- use_admin_context=True)
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ common_policy.set_rules(common_policy.Rules(rules))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers?all_tenants=1')
res = self.controller.index(req)
self.assertTrue('servers' in res)
- def test_all_tenants(self):
+ def test_all_tenants_fail_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None):
self.assertNotEqual(filters, None)
- self.assertEqual(filters['project_id'], 'fake')
return [fakes.stub_instance(100)]
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:non_fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ common_policy.set_rules(common_policy.Rules(rules))
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/servers?all_tenants=1')
- res = self.controller.index(req)
-
- self.assertTrue('servers' in res)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
def test_get_servers_allows_flavor(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -791,7 +836,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_status(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -810,7 +855,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
- """Test getting servers by invalid status"""
+ # Test getting servers by invalid status.
req = fakes.HTTPRequest.blank('/v2/fake/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
@@ -823,7 +868,7 @@ class ServersControllerTest(test.TestCase):
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -843,7 +888,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_name(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -862,7 +907,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -894,7 +939,7 @@ class ServersControllerTest(test.TestCase):
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -923,7 +968,7 @@ class ServersControllerTest(test.TestCase):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -952,7 +997,7 @@ class ServersControllerTest(test.TestCase):
"""Test getting servers by ip with admin_api enabled and
admin context
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -975,7 +1020,7 @@ class ServersControllerTest(test.TestCase):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -1163,6 +1208,20 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['accessIPv6'], '')
+ def test_update_server_personality(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {
+ 'server': {
+ 'personality': []
+ }
+ }
+ req.body = jsonutils.dumps(body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, FAKE_UUID, body)
+
def test_update_server_adminPass_ignored(self):
inst_dict = dict(name='server_test', adminPass='bacon')
body = dict(server=inst_dict)
@@ -1191,7 +1250,7 @@ class ServersControllerTest(test.TestCase):
def test_update_server_not_found(self):
def fake_get(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_get)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
@@ -1204,7 +1263,7 @@ class ServersControllerTest(test.TestCase):
def test_update_server_not_found_on_update(self):
def fake_update(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'update', fake_update)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
@@ -1499,9 +1558,9 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % (i + 1))
- def test_delete_server_instance(self):
+ def _delete_server_instance(self, uuid=FAKE_UUID):
fakes.stub_out_instance_quota(self.stubs, 0, 10)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
req.method = 'DELETE'
self.server_delete_called = False
@@ -1513,10 +1572,17 @@ class ServersControllerTest(test.TestCase):
self.server_delete_called = True
self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
- self.controller.delete(req, FAKE_UUID)
+ self.controller.delete(req, uuid)
+ def test_delete_server_instance(self):
+ self._delete_server_instance()
self.assertEqual(self.server_delete_called, True)
+ def test_delete_server_instance_not_found(self):
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self._delete_server_instance,
+ uuid='non-existent-uuid')
+
def test_delete_server_instance_while_building(self):
fakes.stub_out_instance_quota(self.stubs, 0, 10)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
@@ -1620,7 +1686,7 @@ class ServerStatusTest(test.TestCase):
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
- """Shared implementation for tests below that create instance"""
+ """Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
@@ -1669,8 +1735,8 @@ class ServersControllerCreateTest(test.TestCase):
return self.instance_cache_by_id[instance_id]
def rpc_call_wrapper(context, topic, msg, timeout=None):
- """Stub out the scheduler creating the instance entry"""
- if (topic == FLAGS.scheduler_topic and
+ """Stub out the scheduler creating the instance entry."""
+ if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1)
@@ -1698,7 +1764,7 @@ class ServersControllerCreateTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
@@ -1716,17 +1782,12 @@ class ServersControllerCreateTest(test.TestCase):
fake_method)
def _check_admin_pass_len(self, server_dict):
- """ utility function - check server_dict for adminPass
- length.
-
- """
- self.assertEqual(FLAGS.password_length,
+ """utility function - check server_dict for adminPass length."""
+ self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_pass_missing(self, server_dict):
- """ utility function - check server_dict for absence
- of adminPass
- """
+ """utility function - check server_dict for absence of adminPass."""
self.assertTrue("adminPass" not in server_dict)
def _test_create_instance(self):
@@ -1985,9 +2046,11 @@ class ServersControllerCreateTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
self._test_create_instance()
- def _test_create_extra(self, params):
+ def _test_create_extra(self, params, no_image=False):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ if no_image:
+ server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
@@ -2088,6 +2151,40 @@ class ServersControllerCreateTest(test.TestCase):
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
+ def test_create_instance_with_volumes_enabled_no_image(self):
+ """
+ Test that the create will fail if there is no image
+ and no bdms supplied in the request
+ """
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {}, no_image=True)
+
+ def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
+ """
+ Test that the create works if there is no image supplied but
+ os-volumes extension is enabled and bdms are supplied
+ """
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ bdm = [{'device_name': 'foo'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params, no_image=True)
+
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
@@ -2704,7 +2801,7 @@ class ServersControllerCreateTest(test.TestCase):
'open': 'stack',
},
'personality': {},
- 'config_drive': True,
+ 'config_drive': "true",
},
}
@@ -2784,7 +2881,6 @@ class ServersControllerCreateTest(test.TestCase):
'open': 'stack',
},
'personality': {},
- 'config_drive': True,
},
}
@@ -3253,7 +3349,7 @@ class TestServerCreateRequestXMLDeserializer(test.TestCase):
],
},
}
- self.assertDictMatch(request['body'], expected)
+ self.assertThat(request['body'], matchers.DictMatches(expected))
def test_spec_request(self):
image_bookmark_link = ("http://servers.api.openstack.org/1234/"
@@ -3563,6 +3659,24 @@ class TestServerCreateRequestXMLDeserializer(test.TestCase):
}}
self.assertEquals(request['body'], expected)
+ def test_request_with_config_drive(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="config_drive_test"
+ imageRef="1"
+ flavorRef="1"
+ config_drive="true"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "config_drive_test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "config_drive": "true"
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
class TestAddressesXMLSerialization(test.TestCase):
@@ -3698,7 +3812,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.basic(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
@@ -3720,7 +3834,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.basic(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/fake/images/5"
@@ -3779,7 +3893,12 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_no_image(self):
+ self.instance["image_ref"] = ""
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertEqual(output['server']['image'], "")
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3853,7 +3972,7 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3871,7 +3990,8 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output['server']['fault'], expected_fault)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3890,7 +4010,8 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.get_admin_context()
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output['server']['fault'], expected_fault)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3908,7 +4029,8 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.get_admin_context()
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output['server']['fault'], expected_fault)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
@@ -3989,7 +4111,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_accessipv4(self):
@@ -4051,7 +4173,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_accessipv6(self):
@@ -4113,7 +4235,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
@@ -4177,7 +4299,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
class ServerXMLSerializationTest(test.TestCase):
@@ -5142,7 +5264,7 @@ class ServersAllExtensionsTestCase(test.TestCase):
self.app = compute.APIRouter()
def test_create_missing_server(self):
- """Test create with malformed body"""
+ # Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
@@ -5159,7 +5281,7 @@ class ServersAllExtensionsTestCase(test.TestCase):
self.assertEqual(422, res.status_int)
def test_update_missing_server(self):
- """Test create with malformed body"""
+ # Test create with malformed body.
def fake_update(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
diff --git a/nova/tests/api/openstack/compute/test_urlmap.py b/nova/tests/api/openstack/compute/test_urlmap.py
index 3baa8ad4c..6367a8e5e 100644
--- a/nova/tests/api/openstack/compute/test_urlmap.py
+++ b/nova/tests/api/openstack/compute/test_urlmap.py
@@ -35,7 +35,7 @@ class UrlmapTest(test.TestCase):
nova.tests.image.fake.FakeImageService_reset()
def test_path_version_v1_1(self):
- """Test URL path specifying v1.1 returns v2 content."""
+ # Test URL path specifying v1.1 returns v2 content.
req = webob.Request.blank('/v1.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -45,7 +45,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_content_type_version_v1_1(self):
- """Test Content-Type specifying v1.1 returns v2 content."""
+ # Test Content-Type specifying v1.1 returns v2 content.
req = webob.Request.blank('/')
req.content_type = "application/json;version=1.1"
req.accept = "application/json"
@@ -56,7 +56,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_accept_version_v1_1(self):
- """Test Accept header specifying v1.1 returns v2 content."""
+ # Test Accept header specifying v1.1 returns v2 content.
req = webob.Request.blank('/')
req.accept = "application/json;version=1.1"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -66,7 +66,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_path_version_v2(self):
- """Test URL path specifying v2 returns v2 content."""
+ # Test URL path specifying v2 returns v2 content.
req = webob.Request.blank('/v2/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -76,7 +76,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_content_type_version_v2(self):
- """Test Content-Type specifying v2 returns v2 content."""
+ # Test Content-Type specifying v2 returns v2 content.
req = webob.Request.blank('/')
req.content_type = "application/json;version=2"
req.accept = "application/json"
@@ -87,7 +87,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_accept_version_v2(self):
- """Test Accept header specifying v2 returns v2 content."""
+ # Test Accept header specifying v2 returns v2 content.
req = webob.Request.blank('/')
req.accept = "application/json;version=2"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -97,7 +97,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_path_content_type(self):
- """Test URL path specifying JSON returns JSON content."""
+ # Test URL path specifying JSON returns JSON content.
url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175.json'
req = webob.Request.blank(url)
req.accept = "application/xml"
@@ -109,7 +109,7 @@ class UrlmapTest(test.TestCase):
'cedef40a-ed67-4d10-800e-17455edce175')
def test_accept_content_type(self):
- """Test Accept header specifying JSON returns JSON content."""
+ # Test Accept header specifying JSON returns JSON content.
url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175'
req = webob.Request.blank(url)
req.accept = "application/xml;q=0.8, application/json"
diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py
index 4520faa48..28b109215 100644
--- a/nova/tests/api/openstack/compute/test_versions.py
+++ b/nova/tests/api/openstack/compute/test_versions.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid as stdlib_uuid
+
import feedparser
from lxml import etree
import webob
@@ -26,7 +28,7 @@ from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
-from nova import utils
+from nova.tests import matchers
NS = {
@@ -35,17 +37,17 @@ NS = {
}
-LINKS = {
+EXP_LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
+ 'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.openstack.org/'
- 'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
+ 'api/openstack-compute/2/wadl/os-compute-2.wadl',
},
}
-VERSIONS = {
+EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
@@ -54,12 +56,12 @@ VERSIONS = {
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -77,9 +79,6 @@ VERSIONS = {
class VersionsTest(test.TestCase):
- def setUp(self):
- super(VersionsTest, self).setUp()
- self.stubs.Set(versions, 'VERSIONS', VERSIONS)
def test_get_version_list(self):
req = webob.Request.blank('/')
@@ -130,12 +129,12 @@ class VersionsTest(test.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -174,12 +173,12 @@ class VersionsTest(test.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -208,7 +207,7 @@ class VersionsTest(test.TestCase):
version = etree.XML(res.body)
xmlutil.validate_schema(version, 'version')
- expected = VERSIONS['v2.0']
+ expected = EXP_VERSIONS['v2.0']
self.assertTrue(version.xpath('/ns:version', namespaces=NS))
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
@@ -238,7 +237,7 @@ class VersionsTest(test.TestCase):
for i, v in enumerate(['v2.0']):
version = versions[i]
- expected = VERSIONS[v]
+ expected = EXP_VERSIONS[v]
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
(link,) = version.xpath('atom:link', namespaces=NS)
@@ -276,11 +275,11 @@ class VersionsTest(test.TestCase):
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
- 'href': LINKS['v2.0']['pdf'],
+ 'href': EXP_LINKS['v2.0']['pdf'],
'type': 'application/pdf',
'rel': 'describedby'})
self.assertEqual(entry.links[2], {
- 'href': LINKS['v2.0']['wadl'],
+ 'href': EXP_LINKS['v2.0']['wadl'],
'type': 'application/vnd.sun.wadl+xml',
'rel': 'describedby'})
@@ -346,7 +345,8 @@ class VersionsTest(test.TestCase):
},
], }
- self.assertDictMatch(expected, jsonutils.loads(res.body))
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
def test_multi_choice_image_xml(self):
req = webob.Request.blank('/images/1')
@@ -365,8 +365,11 @@ class VersionsTest(test.TestCase):
self.assertEqual(version.get('status'), 'CURRENT')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
- self.assertTrue(common.compare_media_types(media_types,
- VERSIONS['v2.0']['media-types']))
+ self.assertTrue(common.
+ compare_media_types(media_types,
+ EXP_VERSIONS['v2.0']['media-types']
+ ))
+
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
@@ -383,7 +386,7 @@ class VersionsTest(test.TestCase):
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
- uuid = str(utils.gen_uuid())
+ uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
@@ -416,7 +419,8 @@ class VersionsTest(test.TestCase):
},
], }
- self.assertDictMatch(expected, jsonutils.loads(res.body))
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.TestCase):
@@ -508,7 +512,7 @@ class VersionsSerializerTests(test.TestCase):
"id": "2.7",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
- "media-types": VERSIONS['v2.0']['media-types'],
+ "media-types": EXP_VERSIONS['v2.0']['media-types'],
"links": [
{
"rel": "self",
@@ -597,12 +601,12 @@ class VersionsSerializerTests(test.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": LINKS['v2.0']['pdf'],
+ "href": EXP_LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": LINKS['v2.0']['wadl'],
+ "href": EXP_LINKS['v2.0']['wadl'],
},
],
"media-types": [
@@ -647,9 +651,9 @@ class VersionsSerializerTests(test.TestCase):
self.assertEqual(entry.links[1], {
'rel': 'describedby',
'type': 'application/pdf',
- 'href': LINKS['v2.0']['pdf']})
+ 'href': EXP_LINKS['v2.0']['pdf']})
self.assertEqual(entry.links[2], {
'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
- 'href': LINKS['v2.0']['wadl'],
+ 'href': EXP_LINKS['v2.0']['wadl'],
})
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 4f39e569e..03fc87ac5 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -16,6 +16,7 @@
# under the License.
import datetime
+import uuid
import glanceclient.v1.images
import routes
@@ -44,7 +45,6 @@ from nova.openstack.common import timeutils
from nova import quota
from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs
-from nova import utils
from nova import wsgi
@@ -151,7 +151,7 @@ def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
- stubs.Set(nova.flags, '_get_my_ip', get_my_ip)
+ stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
@@ -373,7 +373,7 @@ def create_info_cache(nw_cache):
def get_fake_uuid(token=0):
if not token in FAKE_UUIDS:
- FAKE_UUIDS[token] = str(utils.gen_uuid())
+ FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
@@ -403,7 +403,7 @@ def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
found_marker = True
servers_list = []
if not marker is None and not found_marker:
- raise exc.MarkerNotFound(marker)
+ raise exc.MarkerNotFound(marker=marker)
if not limit is None:
servers_list = servers_list[:limit]
return servers_list
@@ -473,7 +473,7 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
"vcpus": 0,
"root_gb": 0,
"ephemeral_gb": 0,
- "hostname": "",
+ "hostname": display_name or server_name,
"host": host,
"instance_type_id": 1,
"instance_type": dict(inst_type),
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 4ebd49ca2..7e49e4ab8 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -43,7 +43,7 @@ class LimiterTest(test.TestCase):
"""
def setUp(self):
- """ Run before each test. """
+ """Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
@@ -51,7 +51,7 @@ class LimiterTest(test.TestCase):
self.large = range(10000)
def test_limiter_offset_zero(self):
- """ Test offset key works with 0. """
+ # Test offset key works with 0.
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -59,7 +59,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
- """ Test offset key works with a medium sized number. """
+ # Test offset key works with a medium sized number.
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
@@ -67,7 +67,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
- """ Test offset key works with a number over 1000 (max_limit). """
+ # Test offset key works with a number over 1000 (max_limit).
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
@@ -76,19 +76,19 @@ class LimiterTest(test.TestCase):
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
- """ Test offset key works with a blank offset. """
+ # Test offset key works with a blank offset.
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
- """ Test offset key works with a BAD offset. """
+ # Test offset key works with a BAD offset.
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
- """ Test request with no offset or limit """
+ # Test request with no offset or limit.
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -96,7 +96,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
- """ Test limit of zero. """
+ # Test limit of zero.
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -104,7 +104,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
- """ Test limit of 10. """
+ # Test limit of 10.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -112,7 +112,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
- """ Test limit of 3000. """
+ # Test limit of 3000.
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -120,7 +120,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
- """ Test request with both limit and offset. """
+ # Test request with both limit and offset.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
@@ -132,7 +132,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
- """ Test a max_limit other than 1000. """
+ # Test a max_limit other than 1000.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
@@ -147,13 +147,13 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
- """ Test a negative limit. """
+ # Test a negative limit.
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
- """ Test a negative offset. """
+ # Test a negative offset.
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
@@ -167,30 +167,30 @@ class PaginationParamsTest(test.TestCase):
"""
def test_no_params(self):
- """ Test no params. """
+ # Test no params.
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
- """ Test valid marker param. """
+ # Test valid marker param.
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
- """ Test valid limit param. """
+ # Test valid limit param.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
- """ Test invalid limit param. """
+ # Test invalid limit param.
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
- """ Test valid limit and marker parameters. """
+ # Test valid limit and marker parameters.
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
@@ -284,9 +284,9 @@ class MiscFunctionsTest(test.TestCase):
self.assertEqual(actual, expected)
def test_raise_http_conflict_for_instance_invalid_state(self):
- # Correct args
exc = exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method')
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
try:
common.raise_http_conflict_for_instance_invalid_state(exc,
'meow')
@@ -296,17 +296,6 @@ class MiscFunctionsTest(test.TestCase):
else:
self.fail("webob.exc.HTTPConflict was not raised")
- # Incorrect args
- exc = exception.InstanceInvalidState()
- try:
- common.raise_http_conflict_for_instance_invalid_state(exc,
- 'meow')
- except webob.exc.HTTPConflict as e:
- self.assertEqual(unicode(e),
- "Instance is in an invalid state for 'meow'")
- else:
- self.fail("webob.exc.HTTPConflict was not raised")
-
def test_check_img_metadata_properties_quota_valid_metadata(self):
ctxt = test_utils.get_test_admin_context()
metadata1 = {"key": "value"}
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index 1bd799f8c..a413f9c4d 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -38,7 +38,7 @@ class TestFaults(test.TestCase):
return xml_string
def test_400_fault_json(self):
- """Test fault serialized to JSON via file-extension and/or header."""
+ # Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
@@ -60,7 +60,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_413_fault_json(self):
- """Test fault serialized to JSON via file-extension and/or header."""
+ # Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
@@ -85,7 +85,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_raise(self):
- """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
@@ -97,7 +97,7 @@ class TestFaults(test.TestCase):
self.assertTrue('whut?' in resp.body)
def test_raise_403(self):
- """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
@@ -110,12 +110,12 @@ class TestFaults(test.TestCase):
self.assertTrue('forbidden' in resp.body)
def test_fault_has_status_int(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(fault.status_int, 400)
def test_xml_serializer(self):
- """Ensure that a v1.1 request responds with a v1.1 xmlns"""
+ # Ensure that a v1.1 request responds with a v1.1 xmlns.
request = webob.Request.blank('/v1.1',
headers={"Accept": "application/xml"})
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index 387940fc2..a18dc78d5 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -196,7 +196,7 @@ class XMLDeserializerTest(test.TestCase):
self.assertEqual(deserializer.deserialize(xml), as_dict)
def test_xml_empty(self):
- xml = """<a></a>"""
+ xml = '<a></a>'
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(deserializer.deserialize(xml), as_dict)
@@ -753,7 +753,7 @@ class ResourceTest(test.TestCase):
self.assertEqual(response, 'foo')
def test_resource_exception_handler_type_error(self):
- """A TypeError should be translated to a Fault/HTTP 400"""
+ # A TypeError should be translated to a Fault/HTTP 400.
def foo(a,):
return a
diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py
index 38306068a..083e6c0e9 100644
--- a/nova/tests/api/test_auth.py
+++ b/nova/tests/api/test_auth.py
@@ -93,7 +93,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.roles = "pawn, knight, rook"
def test_roles(self):
- """Test that the newer style role header takes precedence"""
+ # Test that the newer style role header takes precedence.
self.request.headers['X_ROLES'] = 'pawn,knight,rook'
self.request.headers['X_ROLE'] = 'bad'
@@ -106,7 +106,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.assertEqual(response.status, '200 No Roles')
def test_deprecated_role(self):
- """Test fallback to older role header"""
+ # Test fallback to older role header.
self.request.headers['X_ROLE'] = 'pawn,knight,rook'
response = self.request.get_response(self.middleware)
@@ -118,7 +118,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.assertEqual(response.status, '200 No Roles')
def test_no_role_headers(self):
- """Test with no role headers set"""
+ # Test with no role headers set.
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
diff --git a/nova/tests/api/test_sizelimit.py b/nova/tests/api/test_sizelimit.py
index 170198b6b..9c5ab1a27 100644
--- a/nova/tests/api/test_sizelimit.py
+++ b/nova/tests/api/test_sizelimit.py
@@ -15,11 +15,11 @@
import webob
import nova.api.sizelimit
-from nova import flags
+from nova.openstack.common import cfg
from nova import test
-FLAGS = flags.FLAGS
-MAX_REQUEST_BODY_SIZE = FLAGS.osapi_max_request_body_size
+CONF = cfg.CONF
+MAX_REQUEST_BODY_SIZE = CONF.osapi_max_request_body_size
class TestRequestBodySizeLimiter(test.TestCase):
diff --git a/nova/tests/baremetal/__init__.py b/nova/tests/baremetal/__init__.py
index e69de29bb..f15d84efc 100644
--- a/nova/tests/baremetal/__init__.py
+++ b/nova/tests/baremetal/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from nova.tests.baremetal import *
diff --git a/nova/tests/baremetal/db/__init__.py b/nova/tests/baremetal/db/__init__.py
new file mode 100644
index 000000000..543dfc1ae
--- /dev/null
+++ b/nova/tests/baremetal/db/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from nova.tests.baremetal.db import *
diff --git a/nova/tests/baremetal/db/base.py b/nova/tests/baremetal/db/base.py
new file mode 100644
index 000000000..37e51fe79
--- /dev/null
+++ b/nova/tests/baremetal/db/base.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Bare-metal DB test base class."""
+
+from nova import context as nova_context
+from nova.openstack.common import cfg
+from nova import test
+from nova.virt.baremetal.db import migration as bm_migration
+from nova.virt.baremetal.db.sqlalchemy import session as bm_session
+
+_DB_CACHE = None
+
+CONF = cfg.CONF
+CONF.import_opt('sql_connection',
+ 'nova.virt.baremetal.db.sqlalchemy.session',
+ group='baremetal')
+
+
+class Database(test.Database):
+
+ def post_migrations(self):
+ pass
+
+
+class BMDBTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BMDBTestCase, self).setUp()
+ self.flags(sql_connection='sqlite://', group='baremetal')
+ global _DB_CACHE
+ if not _DB_CACHE:
+ _DB_CACHE = Database(bm_session, bm_migration,
+ sql_connection=CONF.baremetal.sql_connection,
+ sqlite_db=None,
+ sqlite_clean_db=None)
+ self.useFixture(_DB_CACHE)
+ self.context = nova_context.get_admin_context()
diff --git a/nova/tests/baremetal/db/test_bm_interface.py b/nova/tests/baremetal/db/test_bm_interface.py
new file mode 100644
index 000000000..9f051ac9b
--- /dev/null
+++ b/nova/tests/baremetal/db/test_bm_interface.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Bare-metal DB testcase for BareMetalInterface
+"""
+
+from nova import exception
+from nova.tests.baremetal.db import base
+from nova.virt.baremetal import db
+
+
+class BareMetalInterfaceTestCase(base.BMDBTestCase):
+
+ def test_unique_address(self):
+ pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
+ '0x1', 1)
+ self.assertRaises(exception.DBError,
+ db.bm_interface_create,
+ self.context, 2, '11:11:11:11:11:11', '0x2', 2)
+ # succeed after delete pif1
+ db.bm_interface_destroy(self.context, pif1_id)
+ pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
+ '0x2', 2)
+ self.assertTrue(pif2_id is not None)
+
+ def test_unique_vif_uuid(self):
+ pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
+ '0x1', 1)
+ pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
+ '0x2', 2)
+ db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
+ self.assertRaises(exception.NovaException,
+ db.bm_interface_set_vif_uuid,
+ self.context, pif2_id, 'AAAA')
+
+ def test_vif_not_found(self):
+ pif_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
+ '0x1', 1)
+ self.assertRaises(exception.NovaException,
+ db.bm_interface_set_vif_uuid,
+ self.context, pif_id + 1, 'AAAA')
diff --git a/nova/tests/baremetal/db/test_bm_node.py b/nova/tests/baremetal/db/test_bm_node.py
new file mode 100644
index 000000000..8cac38378
--- /dev/null
+++ b/nova/tests/baremetal/db/test_bm_node.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Bare-Metal DB testcase for BareMetalNode
+"""
+
+from nova import exception
+from nova.tests.baremetal.db import base
+from nova.tests.baremetal.db import utils
+from nova.virt.baremetal import db
+
+
+class BareMetalNodesTestCase(base.BMDBTestCase):
+
+ def _create_nodes(self):
+ nodes = [
+ utils.new_bm_node(pm_address='0', service_host="host1",
+ memory_mb=100000, cpus=100, local_gb=10000),
+ utils.new_bm_node(pm_address='1', service_host="host2",
+ instance_uuid='A',
+ memory_mb=100000, cpus=100, local_gb=10000),
+ utils.new_bm_node(pm_address='2', service_host="host2",
+ memory_mb=1000, cpus=1, local_gb=1000),
+ utils.new_bm_node(pm_address='3', service_host="host2",
+ memory_mb=1000, cpus=2, local_gb=1000),
+ utils.new_bm_node(pm_address='4', service_host="host2",
+ memory_mb=2000, cpus=1, local_gb=1000),
+ utils.new_bm_node(pm_address='5', service_host="host2",
+ memory_mb=2000, cpus=2, local_gb=1000),
+ ]
+ self.ids = []
+ for n in nodes:
+ ref = db.bm_node_create(self.context, n)
+ self.ids.append(ref['id'])
+
+ def test_get_all0(self):
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(r, [])
+
+ def test_get_all(self):
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(r, [])
+
+ self._create_nodes()
+
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(len(r), 6)
+
+ def test_get(self):
+ self._create_nodes()
+
+ r = db.bm_node_get(self.context, self.ids[0])
+ self.assertEquals(r['pm_address'], '0')
+
+ r = db.bm_node_get(self.context, self.ids[1])
+ self.assertEquals(r['pm_address'], '1')
+
+ self.assertRaises(
+ exception.InstanceNotFound,
+ db.bm_node_get,
+ self.context, -1)
+
+ def test_get_by_service_host(self):
+ self._create_nodes()
+
+ r = db.bm_node_get_all(self.context, service_host=None)
+ self.assertEquals(len(r), 6)
+
+ r = db.bm_node_get_all(self.context, service_host="host1")
+ self.assertEquals(len(r), 1)
+ self.assertEquals(r[0]['pm_address'], '0')
+
+ r = db.bm_node_get_all(self.context, service_host="host2")
+ self.assertEquals(len(r), 5)
+ pmaddrs = [x['pm_address'] for x in r]
+ self.assertIn('1', pmaddrs)
+ self.assertIn('2', pmaddrs)
+ self.assertIn('3', pmaddrs)
+ self.assertIn('4', pmaddrs)
+ self.assertIn('5', pmaddrs)
+
+ r = db.bm_node_get_all(self.context, service_host="host3")
+ self.assertEquals(r, [])
+
+ def test_destroy(self):
+ self._create_nodes()
+
+ db.bm_node_destroy(self.context, self.ids[0])
+
+ self.assertRaises(
+ exception.InstanceNotFound,
+ db.bm_node_get,
+ self.context, self.ids[0])
+
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(len(r), 5)
+
+ def test_find_free(self):
+ self._create_nodes()
+ fn = db.bm_node_find_free(self.context, 'host2')
+ self.assertEqual(fn['pm_address'], '2')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=500, cpus=2, local_gb=100)
+ self.assertEqual(fn['pm_address'], '3')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=1001, cpus=1, local_gb=1000)
+ self.assertEqual(fn['pm_address'], '4')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=1, local_gb=1000)
+ self.assertEqual(fn['pm_address'], '4')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=2, local_gb=1000)
+ self.assertEqual(fn['pm_address'], '5')
+
+ # check memory_mb
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2001, cpus=2, local_gb=1000)
+ self.assertTrue(fn is None)
+
+ # check cpus
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=3, local_gb=1000)
+ self.assertTrue(fn is None)
+
+ # check local_gb
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=2, local_gb=1001)
+ self.assertTrue(fn is None)
diff --git a/nova/tests/baremetal/db/test_bm_pxe_ip.py b/nova/tests/baremetal/db/test_bm_pxe_ip.py
new file mode 100644
index 000000000..9a93b46ad
--- /dev/null
+++ b/nova/tests/baremetal/db/test_bm_pxe_ip.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Bare-metal DB testcase for BareMetalPxeIp
+"""
+
+from nova import exception
+from nova.tests.baremetal.db import base
+from nova.tests.baremetal.db import utils
+from nova.virt.baremetal import db
+
+
+class BareMetalPxeIpTestCase(base.BMDBTestCase):
+
+ def _create_pxe_ip(self):
+ i1 = utils.new_bm_pxe_ip(address='10.1.1.1',
+ server_address='10.1.1.101')
+ i2 = utils.new_bm_pxe_ip(address='10.1.1.2',
+ server_address='10.1.1.102')
+
+ i1_ref = db.bm_pxe_ip_create_direct(self.context, i1)
+ self.assertTrue(i1_ref['id'] is not None)
+ self.assertEqual(i1_ref['address'], '10.1.1.1')
+ self.assertEqual(i1_ref['server_address'], '10.1.1.101')
+
+ i2_ref = db.bm_pxe_ip_create_direct(self.context, i2)
+ self.assertTrue(i2_ref['id'] is not None)
+ self.assertEqual(i2_ref['address'], '10.1.1.2')
+ self.assertEqual(i2_ref['server_address'], '10.1.1.102')
+
+ self.i1 = i1_ref
+ self.i2 = i2_ref
+
+ def test_unuque_address(self):
+ self._create_pxe_ip()
+
+ # address duplicates
+ i = utils.new_bm_pxe_ip(address='10.1.1.1',
+ server_address='10.1.1.201')
+ self.assertRaises(exception.DBError,
+ db.bm_pxe_ip_create_direct,
+ self.context, i)
+
+ # server_address duplicates
+ i = utils.new_bm_pxe_ip(address='10.1.1.3',
+ server_address='10.1.1.101')
+ self.assertRaises(exception.DBError,
+ db.bm_pxe_ip_create_direct,
+ self.context, i)
+
+ db.bm_pxe_ip_destroy(self.context, self.i1['id'])
+ i = utils.new_bm_pxe_ip(address='10.1.1.1',
+ server_address='10.1.1.101')
+ ref = db.bm_pxe_ip_create_direct(self.context, i)
+ self.assertTrue(ref is not None)
+
+ def test_bm_pxe_ip_associate(self):
+ self._create_pxe_ip()
+ node = db.bm_node_create(self.context, utils.new_bm_node())
+ ip_id = db.bm_pxe_ip_associate(self.context, node['id'])
+ ref = db.bm_pxe_ip_get(self.context, ip_id)
+ self.assertEqual(ref['bm_node_id'], node['id'])
+
+ def test_bm_pxe_ip_associate_raise(self):
+ self._create_pxe_ip()
+ node_id = 123
+ self.assertRaises(exception.NovaException,
+ db.bm_pxe_ip_associate,
+ self.context, node_id)
+
+ def test_delete_by_address(self):
+ self._create_pxe_ip()
+ db.bm_pxe_ip_destroy_by_address(self.context, '10.1.1.1')
+ del_ref = db.bm_pxe_ip_get(self.context, self.i1['id'])
+ self.assertTrue(del_ref is None)
+
+ def test_delete_by_address_not_exist(self):
+ self._create_pxe_ip()
+ del_ref = db.bm_pxe_ip_destroy_by_address(self.context, '10.11.12.13')
+ self.assertTrue(del_ref is None)
diff --git a/nova/tests/baremetal/db/utils.py b/nova/tests/baremetal/db/utils.py
new file mode 100644
index 000000000..800305402
--- /dev/null
+++ b/nova/tests/baremetal/db/utils.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Bare-metal test utils."""
+
+from nova import test
+from nova.virt.baremetal.db.sqlalchemy import models as bm_models
+
+
+def new_bm_node(**kwargs):
+ h = bm_models.BareMetalNode()
+ h.id = kwargs.pop('id', None)
+ h.service_host = kwargs.pop('service_host', None)
+ h.instance_uuid = kwargs.pop('instance_uuid', None)
+ h.cpus = kwargs.pop('cpus', 1)
+ h.memory_mb = kwargs.pop('memory_mb', 1024)
+ h.local_gb = kwargs.pop('local_gb', 64)
+ h.pm_address = kwargs.pop('pm_address', '192.168.1.1')
+ h.pm_user = kwargs.pop('pm_user', 'ipmi_user')
+ h.pm_password = kwargs.pop('pm_password', 'ipmi_password')
+ h.prov_mac_address = kwargs.pop('prov_mac_address', '12:34:56:78:90:ab')
+ h.registration_status = kwargs.pop('registration_status', 'done')
+ h.task_state = kwargs.pop('task_state', None)
+ h.prov_vlan_id = kwargs.pop('prov_vlan_id', None)
+ h.terminal_port = kwargs.pop('terminal_port', 8000)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return h
+
+
+def new_bm_pxe_ip(**kwargs):
+ x = bm_models.BareMetalPxeIp()
+ x.id = kwargs.pop('id', None)
+ x.address = kwargs.pop('address', None)
+ x.server_address = kwargs.pop('server_address', None)
+ x.bm_node_id = kwargs.pop('bm_node_id', None)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return x
+
+
+def new_bm_interface(**kwargs):
+ x = bm_models.BareMetalInterface()
+ x.id = kwargs.pop('id', None)
+ x.bm_node_id = kwargs.pop('bm_node_id', None)
+ x.address = kwargs.pop('address', None)
+ x.datapath_id = kwargs.pop('datapath_id', None)
+ x.port_no = kwargs.pop('port_no', None)
+ x.vif_uuid = kwargs.pop('vif_uuid', None)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return x
+
+
+def new_bm_deployment(**kwargs):
+ x = bm_models.BareMetalDeployment()
+ x.id = kwargs.pop('id', None)
+ x.key = kwargs.pop('key', None)
+ x.image_path = kwargs.pop('image_path', None)
+ x.pxe_config_path = kwargs.pop('pxe_config_path', None)
+ x.root_mb = kwargs.pop('root_mb', None)
+ x.swap_mb = kwargs.pop('swap_mb', None)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return x
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
new file mode 100644
index 000000000..37ef71881
--- /dev/null
+++ b/nova/tests/baremetal/test_driver.py
@@ -0,0 +1,185 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 University of Southern California / ISI
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the base baremetal driver class."""
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova import test
+from nova.tests.baremetal.db import base as bm_db_base
+from nova.tests.baremetal.db import utils as bm_db_utils
+from nova.tests.image import fake as fake_image
+from nova.tests import utils
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import db
+from nova.virt.baremetal import driver as bm_driver
+from nova.virt.baremetal import fake
+
+
+CONF = cfg.CONF
+
+COMMON_FLAGS = dict(
+ firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
+ host='test_host',
+)
+
+BAREMETAL_FLAGS = dict(
+ driver='nova.virt.baremetal.fake.FakeDriver',
+ instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
+ power_manager='nova.virt.baremetal.fake.FakePowerManager',
+ vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
+ volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
+ group='baremetal',
+)
+
+
+class BareMetalDriverNoDBTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BareMetalDriverNoDBTestCase, self).setUp()
+ self.flags(**COMMON_FLAGS)
+ self.flags(**BAREMETAL_FLAGS)
+ self.driver = bm_driver.BareMetalDriver(None)
+
+ def test_validate_driver_loading(self):
+ self.assertTrue(isinstance(self.driver.driver,
+ fake.FakeDriver))
+ self.assertTrue(isinstance(self.driver.vif_driver,
+ fake.FakeVifDriver))
+ self.assertTrue(isinstance(self.driver.volume_driver,
+ fake.FakeVolumeDriver))
+ self.assertTrue(isinstance(self.driver.firewall_driver,
+ fake.FakeFirewallDriver))
+
+
+class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
+
+ def setUp(self):
+ super(BareMetalDriverWithDBTestCase, self).setUp()
+ self.flags(**COMMON_FLAGS)
+ self.flags(**BAREMETAL_FLAGS)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.context = utils.get_test_admin_context()
+ self.driver = bm_driver.BareMetalDriver(None)
+ self.node_info = bm_db_utils.new_bm_node(
+ id=123,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ )
+ self.nic_info = [
+ {'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+ self.addCleanup(fake_image.FakeImageService_reset)
+
+ def _create_node(self):
+ self.node = db.bm_node_create(self.context, self.node_info)
+ for nic in self.nic_info:
+ db.bm_interface_create(
+ self.context,
+ self.node['id'],
+ nic['address'],
+ nic['datapath_id'],
+ nic['port_no'],
+ )
+ self.test_instance = utils.get_test_instance()
+ self.test_instance['node'] = self.node['id']
+ self.spawn_params = dict(
+ admin_password='test_pass',
+ block_device_info=None,
+ context=self.context,
+ image_meta=utils.get_test_image_info(None,
+ self.test_instance),
+ injected_files=[('/fake/path', 'hello world')],
+ instance=self.test_instance,
+ network_info=utils.get_test_network_info(),
+ )
+
+ def test_get_host_stats(self):
+ self._create_node()
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, list))
+ self.assertEqual(len(stats), 1)
+ stats = stats[0]
+ self.assertEqual(stats['cpu_arch'], 'test')
+ self.assertEqual(stats['test_spec'], 'test_value')
+ self.assertEqual(stats['hypervisor_type'], 'baremetal')
+ self.assertEqual(stats['hypervisor_hostname'], '123')
+ self.assertEqual(stats['host'], 'test_host')
+ self.assertEqual(stats['vcpus'], 2)
+ self.assertEqual(stats['host_memory_total'], 2048)
+
+ def test_spawn_ok(self):
+ self._create_node()
+ self.driver.spawn(**self.spawn_params)
+ row = db.bm_node_get(self.context, self.node['id'])
+ self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
+
+ def test_macs_for_instance(self):
+ self._create_node()
+ expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
+ self.assertEqual(
+ expected, self.driver.macs_for_instance(self.test_instance))
+
+ def test_macs_for_instance_no_interfaces(self):
+ # Nodes cannot boot with no MACs, so we raise an error if that happens.
+ self.nic_info = []
+ self._create_node()
+ self.assertRaises(exception.NovaException,
+ self.driver.macs_for_instance, self.test_instance)
+
+ def test_spawn_node_in_use(self):
+ self._create_node()
+ db.bm_node_update(self.context, self.node['id'],
+ {'instance_uuid': '1234-5678'})
+
+ self.assertRaises(exception.NovaException,
+ self.driver.spawn, **self.spawn_params)
+
+ row = db.bm_node_get(self.context, self.node['id'])
+ self.assertEqual(row['task_state'], None)
+
+ def test_spawn_node_not_found(self):
+ self._create_node()
+ db.bm_node_update(self.context, self.node['id'],
+ {'id': 9876})
+
+ self.assertRaises(exception.NovaException,
+ self.driver.spawn, **self.spawn_params)
+
+ row = db.bm_node_get(self.context, 9876)
+ self.assertEqual(row['task_state'], None)
+
+ def test_spawn_fails(self):
+ self._create_node()
+
+ self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
+ fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.driver.spawn, **self.spawn_params)
+
+ row = db.bm_node_get(self.context, self.node['id'])
+ self.assertEqual(row['task_state'], baremetal_states.ERROR)
diff --git a/nova/tests/baremetal/test_ipmi.py b/nova/tests/baremetal/test_ipmi.py
new file mode 100644
index 000000000..87800cb47
--- /dev/null
+++ b/nova/tests/baremetal/test_ipmi.py
@@ -0,0 +1,222 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test class for baremetal IPMI power manager."""
+
+import os
+import stat
+import tempfile
+
+from nova.openstack.common import cfg
+from nova import test
+from nova.tests.baremetal.db import utils as bm_db_utils
+from nova import utils
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import ipmi
+from nova.virt.baremetal import utils as bm_utils
+
+CONF = cfg.CONF
+
+
+class BareMetalIPMITestCase(test.TestCase):
+
+ def setUp(self):
+ super(BareMetalIPMITestCase, self).setUp()
+ self.node = bm_db_utils.new_bm_node(
+ id=123,
+ pm_address='fake-address',
+ pm_user='fake-user',
+ pm_password='fake-password')
+ self.ipmi = ipmi.IPMI(self.node)
+
+ def test_construct(self):
+ self.assertEqual(self.ipmi.node_id, 123)
+ self.assertEqual(self.ipmi.address, 'fake-address')
+ self.assertEqual(self.ipmi.user, 'fake-user')
+ self.assertEqual(self.ipmi.password, 'fake-password')
+
+ def test_make_password_file(self):
+ pw_file = ipmi._make_password_file(self.node['pm_password'])
+ try:
+ self.assertTrue(os.path.isfile(pw_file))
+ self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0777, 0600)
+ with open(pw_file, "r") as f:
+ pm_password = f.read()
+ self.assertEqual(pm_password, self.node['pm_password'])
+ finally:
+ os.unlink(pw_file)
+
+ def test_exec_ipmitool(self):
+ pw_file = '/tmp/password_file'
+
+ self.mox.StubOutWithMock(ipmi, '_make_password_file')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
+ ipmi._make_password_file(self.ipmi.password).AndReturn(pw_file)
+ args = [
+ 'ipmitool',
+ '-I', 'lanplus',
+ '-H', self.ipmi.address,
+ '-U', self.ipmi.user,
+ '-f', pw_file,
+ 'A', 'B', 'C',
+ ]
+ utils.execute(*args, attempts=3).AndReturn(('', ''))
+ bm_utils.unlink_without_raise(pw_file).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.ipmi._exec_ipmitool('A B C')
+ self.mox.VerifyAll()
+
+ def test_is_power(self):
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi._is_power("on")
+ self.mox.VerifyAll()
+
+ def test_power_already_on(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
+
+ def test_power_on_ok(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
+
+ def test_power_on_fail(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
+
+ def test_power_on_max_retries(self):
+ self.flags(ipmi_power_retry=2, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
+ self.assertEqual(self.ipmi.retries, 3)
+
+ def test_power_off_ok(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.ipmi._exec_ipmitool("power off").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.ACTIVE
+ self.ipmi._power_off()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.DELETED)
+
+ def test_get_console_pid_path(self):
+ self.flags(terminal_pid_dir='/tmp', group='baremetal')
+ path = ipmi._get_console_pid_path(self.ipmi.node_id)
+ self.assertEqual(path, '/tmp/%s.pid' % self.ipmi.node_id)
+
+ def test_console_pid(self):
+ fd, path = tempfile.mkstemp()
+ with os.fdopen(fd, 'w') as f:
+ f.write("12345\n")
+
+ self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
+ ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
+ self.mox.ReplayAll()
+
+ pid = ipmi._get_console_pid(self.ipmi.node_id)
+ bm_utils.unlink_without_raise(path)
+ self.mox.VerifyAll()
+ self.assertEqual(pid, 12345)
+
+ def test_console_pid_nan(self):
+ fd, path = tempfile.mkstemp()
+ with os.fdopen(fd, 'w') as f:
+ f.write("hello world\n")
+
+ self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
+ ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
+ self.mox.ReplayAll()
+
+ pid = ipmi._get_console_pid(self.ipmi.node_id)
+ bm_utils.unlink_without_raise(path)
+ self.mox.VerifyAll()
+ self.assertTrue(pid is None)
+
+ def test_console_pid_file_not_found(self):
+ pid_path = ipmi._get_console_pid_path(self.ipmi.node_id)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.path.exists(pid_path).AndReturn(False)
+ self.mox.ReplayAll()
+
+ pid = ipmi._get_console_pid(self.ipmi.node_id)
+ self.mox.VerifyAll()
+ self.assertTrue(pid is None)
diff --git a/nova/tests/baremetal/test_nova_baremetal_manage.py b/nova/tests/baremetal/test_nova_baremetal_manage.py
new file mode 100644
index 000000000..4d152a028
--- /dev/null
+++ b/nova/tests/baremetal/test_nova_baremetal_manage.py
@@ -0,0 +1,49 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2011 OpenStack LLC
+# Copyright 2011 Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import imp
+import os
+import sys
+
+from nova import context
+from nova import test
+from nova.virt.baremetal import db as bmdb
+
+from nova.tests.baremetal.db import base as bm_db_base
+
+TOPDIR = os.path.normpath(os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+BM_MAN_PATH = os.path.join(TOPDIR, 'bin', 'nova-baremetal-manage')
+
+sys.dont_write_bytecode = True
+bm_man = imp.load_source('bm_man', BM_MAN_PATH)
+sys.dont_write_bytecode = False
+
+
+class BareMetalDbCommandsTestCase(bm_db_base.BMDBTestCase):
+ def setUp(self):
+ super(BareMetalDbCommandsTestCase, self).setUp()
+ self.commands = bm_man.BareMetalDbCommands()
+
+ def test_sync_and_version(self):
+ self.commands.sync()
+ v = self.commands.version()
+ self.assertTrue(v > 0)
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
deleted file mode 100644
index e9184ee5d..000000000
--- a/nova/tests/baremetal/test_proxy_bare_metal.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 University of Southern California
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import __builtin__
-
-import functools
-import mox
-import StringIO
-
-from nova.compute import power_state
-from nova import exception
-from nova import flags
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests import fake_utils
-
-from nova.virt.baremetal import dom
-from nova.virt.baremetal import driver
-
-
-FLAGS = flags.FLAGS
-
-
-# Same fake_domains is used by different classes,
-# but different fake_file is used by different classes for unit test.
-fake_domains = [{'status': 1, 'name': 'instance-00000001',
- 'memory_kb': 16777216, 'kernel_id': '1896115634',
- 'ramdisk_id': '', 'image_id': '1552326678',
- 'vcpus': 1, 'node_id': 6,
- 'mac_address': '02:16:3e:01:4e:c9',
- 'ip_address': '10.5.1.2'}]
-
-
-class DomainReadWriteTestCase(test.TestCase):
-
- def setUp(self):
- super(DomainReadWriteTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
-
- def test_read_domain_with_empty_list(self):
- """Read a file that contains no domains"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- fake_file = StringIO.StringIO('[]')
- open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
-
- self.mox.ReplayAll()
-
- domains = dom.read_domains('/tftpboot/test_fake_dom_file')
-
- self.assertEqual(domains, [])
-
- def test_read_domain(self):
- """Read a file that contains at least one domain"""
- fake_file = StringIO.StringIO('''[{"status": 1,
- "image_id": "1552326678", "vcpus": 1, "node_id": 6,
- "name": "instance-00000001", "memory_kb": 16777216,
- "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
- "ramdisk_id": "", "ip_address": "10.5.1.2"}]''')
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
-
- self.mox.ReplayAll()
-
- domains = dom.read_domains('/tftpboot/test_fake_dom_file')
-
- self.assertEqual(domains, fake_domains)
-
- def test_read_no_file(self):
- """Try to read when the file does not exist
-
- This should through and IO exception"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- open('/tftpboot/test_fake_dom_file',
- 'r').AndRaise(IOError(2, 'No such file or directory',
- '/tftpboot/test_fake_dom_file'))
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NotFound, dom.read_domains,
- '/tftpboot/test_fake_dom_file')
-
- def assertJSONEquals(self, x, y):
- """Check if two json strings represent the equivalent Python object"""
- self.assertEquals(jsonutils.loads(x), jsonutils.loads(y))
- return jsonutils.loads(x) == jsonutils.loads(y)
-
- def test_write_domain(self):
- """Write the domain to file"""
- self.mox.StubOutWithMock(__builtin__, 'open')
- mock_file = self.mox.CreateMock(file)
- expected_json = '''[{"status": 1,
- "image_id": "1552326678", "vcpus": 1, "node_id": 6,
- "name": "instance-00000001", "memory_kb": 16777216,
- "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
- "ramdisk_id": "", "ip_address": "10.5.1.2"}]'''
- open('/tftpboot/test_fake_dom_file', 'w').AndReturn(mock_file)
-
- # Check if the argument to file.write() represents the same
- # Python object as expected_json
- # We can't do an exact string comparison
- # because of ordering and whitespace
- mock_file.write(mox.Func(functools.partial(self.assertJSONEquals,
- expected_json)))
- mock_file.close()
-
- self.mox.ReplayAll()
-
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
-
-class BareMetalDomTestCase(test.TestCase):
-
- def setUp(self):
- super(BareMetalDomTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
- # Stub out utils.execute
- fake_utils.stub_out_utils_execute(self.stubs)
-
- def tearDown(self):
- super(BareMetalDomTestCase, self).tearDown()
-
- # Reset the singleton state
- dom.BareMetalDom._instance = None
- dom.BareMetalDom._is_init = False
-
- def test_read_domain_only_once(self):
- """Confirm that the domain is read from a file only once,
- even if the object is instantiated multiple times"""
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
- dom.write_domains('/tftpboot/test_fake_dom_file', [])
-
- self.mox.ReplayAll()
-
- # Instantiate multiple instances
- x = dom.BareMetalDom()
- x = dom.BareMetalDom()
- x = dom.BareMetalDom()
-
- def test_init_no_domains(self):
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
- dom.write_domains('/tftpboot/test_fake_dom_file', [])
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- # Expectd values
- self.assertEqual(bmdom.fake_dom_nums, 0)
-
- def test_init_remove_non_running_domain(self):
- """Check to see that all entries in the domain list are removed
- except for the one that is in the running state"""
-
- domains = [dict(node_id=1, name='i-00000001',
- status=power_state.NOSTATE),
- dict(node_id=2, name='i-00000002', status=power_state.RUNNING),
- dict(node_id=3, name='i-00000003', status=power_state.PAUSED),
- dict(node_id=5, name='i-00000004', status=power_state.SHUTDOWN),
- dict(node_id=7, name='i-00000005', status=power_state.CRASHED),
- dict(node_id=8, name='i-00000006', status=power_state.SUSPENDED),
- dict(node_id=9, name='i-00000007', status=power_state.NOSTATE)]
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn(domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- self.assertEqual(bmdom.domains, [{'node_id': 2,
- 'name': 'i-00000002',
- 'status': power_state.RUNNING}])
- self.assertEqual(bmdom.fake_dom_nums, 1)
-
- def test_find_domain(self):
- domain = {'status': 1, 'name': 'instance-00000001',
- 'memory_kb': 16777216, 'kernel_id': '1896115634',
- 'ramdisk_id': '', 'image_id': '1552326678',
- 'vcpus': 1, 'node_id': 6,
- 'mac_address': '02:16:3e:01:4e:c9',
- 'ip_address': '10.5.1.2'}
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- # Expected calls
- dom.read_domains('/tftpboot/'
- 'test_fake_dom_file').AndReturn(fake_domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- # Expected values
- self.assertEquals(bmdom.find_domain('instance-00000001'), domain)
-
-
-class BareMetalTestCase(test.TestCase):
-
- test_ip = '10.11.12.13'
- test_instance = {'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'mac_address': '02:12:34:46:56:67',
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '123456',
- 'instance_type_id': '5'} # m1.small
-
- def setUp(self):
- super(BareMetalTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
- fake_utils.stub_out_utils_execute(self.stubs)
-
- def test_get_info(self):
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- # Expected calls
- dom.read_domains('/tftpboot/'
- 'test_fake_dom_file').AndReturn(fake_domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- conn = driver.BareMetalDriver(None, True)
- # TODO(mikalstill): this is not a very good fake instance
- info = conn.get_info({'name': 'instance-00000001'})
-
- # Expected values
- self.assertEquals(info['mem'], 16777216)
- self.assertEquals(info['state'], 1)
- self.assertEquals(info['num_cpu'], 1)
- self.assertEquals(info['cpu_time'], 100)
- self.assertEquals(info['max_mem'], 16777216)
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
new file mode 100644
index 000000000..45c9ede43
--- /dev/null
+++ b/nova/tests/baremetal/test_pxe.py
@@ -0,0 +1,532 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for baremetal pxe driver."""
+
+import os
+
+import mox
+from testtools.matchers import Contains
+from testtools.matchers import MatchesAll
+from testtools.matchers import Not
+from testtools.matchers import StartsWith
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova import test
+from nova.tests.baremetal.db import base as bm_db_base
+from nova.tests.baremetal.db import utils as bm_db_utils
+from nova.tests.image import fake as fake_image
+from nova.tests import utils
+from nova.virt.baremetal import db
+from nova.virt.baremetal import pxe
+from nova.virt.baremetal import utils as bm_utils
+from nova.virt.disk import api as disk_api
+
+CONF = cfg.CONF
+
+COMMON_FLAGS = dict(
+ firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
+ host='test_host',
+)
+
+BAREMETAL_FLAGS = dict(
+ driver='nova.virt.baremetal.pxe.PXE',
+ instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
+ power_manager='nova.virt.baremetal.fake.FakePowerManager',
+ vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
+ volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
+ group='baremetal',
+)
+
+
+class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
+
+ def setUp(self):
+ super(BareMetalPXETestCase, self).setUp()
+ self.flags(**COMMON_FLAGS)
+ self.flags(**BAREMETAL_FLAGS)
+ self.driver = pxe.PXE()
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.addCleanup(fake_image.FakeImageService_reset)
+ self.context = utils.get_test_admin_context()
+ self.test_block_device_info = None,
+ self.instance = utils.get_test_instance()
+ self.test_network_info = utils.get_test_network_info(),
+ self.node_info = bm_db_utils.new_bm_node(
+ id=123,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ prov_mac_address='11:11:11:11:11:11',
+ )
+ self.nic_info = [
+ {'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+
+ def _create_node(self):
+ self.node = db.bm_node_create(self.context, self.node_info)
+ for nic in self.nic_info:
+ db.bm_interface_create(
+ self.context,
+ self.node['id'],
+ nic['address'],
+ nic['datapath_id'],
+ nic['port_no'],
+ )
+ self.instance['node'] = self.node['id']
+ self.spawn_params = dict(
+ admin_password='test_pass',
+ block_device_info=self.test_block_device_info,
+ context=self.context,
+ image_meta=utils.get_test_image_info(None,
+ self.instance),
+ injected_files=[('/fake/path', 'hello world')],
+ instance=self.instance,
+ network_info=self.test_network_info,
+ )
+
+
+class PXEClassMethodsTestCase(BareMetalPXETestCase):
+
+ def test_build_pxe_config(self):
+ args = {
+ 'deployment_id': 'aaa',
+ 'deployment_key': 'bbb',
+ 'deployment_iscsi_iqn': 'ccc',
+ 'deployment_aki_path': 'ddd',
+ 'deployment_ari_path': 'eee',
+ 'aki_path': 'fff',
+ 'ari_path': 'ggg',
+ }
+ config = pxe.build_pxe_config(**args)
+ self.assertThat(config, StartsWith('default deploy'))
+
+ # deploy bits are in the deploy section
+ start = config.index('label deploy')
+ end = config.index('label boot')
+ self.assertThat(config[start:end], MatchesAll(
+ Contains('kernel ddd'),
+ Contains('initrd=eee'),
+ Contains('deployment_id=aaa'),
+ Contains('deployment_key=bbb'),
+ Contains('iscsi_target_iqn=ccc'),
+ Not(Contains('kernel fff')),
+ ))
+
+ # boot bits are in the boot section
+ start = config.index('label boot')
+ self.assertThat(config[start:], MatchesAll(
+ Contains('kernel fff'),
+ Contains('initrd=ggg'),
+ Not(Contains('kernel ddd')),
+ ))
+
+ def test_build_network_config(self):
+ net = utils.get_test_network_info(1)
+ config = pxe.build_network_config(net)
+ self.assertIn('eth0', config)
+ self.assertNotIn('eth1', config)
+ self.assertIn('hwaddress ether fake', config)
+ self.assertNotIn('hwaddress ether aa:bb:cc:dd', config)
+
+ net[0][1]['mac'] = 'aa:bb:cc:dd'
+ config = pxe.build_network_config(net)
+ self.assertIn('hwaddress ether aa:bb:cc:dd', config)
+
+ net = utils.get_test_network_info(2)
+ config = pxe.build_network_config(net)
+ self.assertIn('eth0', config)
+ self.assertIn('eth1', config)
+
+ def test_build_network_config_dhcp(self):
+ self.flags(
+ net_config_template='$pybasedir/nova/virt/baremetal/'
+ 'net-dhcp.ubuntu.template',
+ group='baremetal',
+ )
+ net = utils.get_test_network_info()
+ net[0][1]['ips'][0]['ip'] = '1.2.3.4'
+ config = pxe.build_network_config(net)
+ self.assertIn('iface eth0 inet dhcp', config)
+ self.assertNotIn('address 1.2.3.4', config)
+
+ def test_build_network_config_static(self):
+ self.flags(
+ net_config_template='$pybasedir/nova/virt/baremetal/'
+ 'net-static.ubuntu.template',
+ group='baremetal',
+ )
+ net = utils.get_test_network_info()
+ net[0][1]['ips'][0]['ip'] = '1.2.3.4'
+ config = pxe.build_network_config(net)
+ self.assertIn('iface eth0 inet static', config)
+ self.assertIn('address 1.2.3.4', config)
+
+ def test_image_dir_path(self):
+ self.assertEqual(
+ pxe.get_image_dir_path(self.instance),
+ os.path.join(CONF.instances_path, 'instance-00000001'))
+
+ def test_image_file_path(self):
+ self.assertEqual(
+ pxe.get_image_file_path(self.instance),
+ os.path.join(
+ CONF.instances_path, 'instance-00000001', 'disk'))
+
+ def test_pxe_config_file_path(self):
+ self.instance['uuid'] = 'aaaa-bbbb-cccc'
+ self.assertEqual(
+ pxe.get_pxe_config_file_path(self.instance),
+ os.path.join(CONF.baremetal.tftp_root,
+ 'aaaa-bbbb-cccc', 'config'))
+
+ def test_pxe_mac_path(self):
+ self.assertEqual(
+ pxe.get_pxe_mac_path('23:45:67:89:AB'),
+ os.path.join(CONF.baremetal.tftp_root,
+ 'pxelinux.cfg', '01-23-45-67-89-ab'))
+
+ def test_get_instance_deploy_ids(self):
+ self.instance['extra_specs'] = {
+ 'deploy_kernel_id': 'aaaa',
+ 'deploy_ramdisk_id': 'bbbb',
+ }
+ self.flags(deploy_kernel="fail", group='baremetal')
+ self.flags(deploy_ramdisk="fail", group='baremetal')
+
+ self.assertEqual(
+ pxe.get_deploy_aki_id(self.instance), 'aaaa')
+ self.assertEqual(
+ pxe.get_deploy_ari_id(self.instance), 'bbbb')
+
+ def test_get_default_deploy_ids(self):
+ self.instance['extra_specs'] = {}
+ self.flags(deploy_kernel="aaaa", group='baremetal')
+ self.flags(deploy_ramdisk="bbbb", group='baremetal')
+
+ self.assertEqual(
+ pxe.get_deploy_aki_id(self.instance), 'aaaa')
+ self.assertEqual(
+ pxe.get_deploy_ari_id(self.instance), 'bbbb')
+
+ def test_get_partition_sizes(self):
+ # m1.tiny: 10GB root, 0GB swap
+ self.instance['instance_type_id'] = 1
+ sizes = pxe.get_partition_sizes(self.instance)
+ self.assertEqual(sizes[0], 10240)
+ self.assertEqual(sizes[1], 1)
+
+ # kinda.big: 40GB root, 1GB swap
+ ref = utils.get_test_instance_type()
+ self.instance['instance_type_id'] = ref['id']
+ self.instance['root_gb'] = ref['root_gb']
+ sizes = pxe.get_partition_sizes(self.instance)
+ self.assertEqual(sizes[0], 40960)
+ self.assertEqual(sizes[1], 1024)
+
+ def test_get_tftp_image_info(self):
+ # Raises an exception when options are neither specified
+ # on the instance nor in configuration file
+ CONF.baremetal.deploy_kernel = None
+ CONF.baremetal.deploy_ramdisk = None
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
+ # Even if the instance includes kernel_id and ramdisk_id,
+ # we still need deploy_kernel_id and deploy_ramdisk_id.
+ # If those aren't present in instance[], and not specified in
+ # config file, then we raise an exception.
+ self.instance['kernel_id'] = 'aaaa'
+ self.instance['ramdisk_id'] = 'bbbb'
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
+ # If an instance doesn't specify deploy_kernel_id or deploy_ramdisk_id,
+ # but defaults are set in the config file, we should use those.
+
+ # Here, we confirm both that all four values were set
+ # and that the proper paths are getting set for all of them
+ CONF.baremetal.deploy_kernel = 'cccc'
+ CONF.baremetal.deploy_ramdisk = 'dddd'
+ base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
+ res = pxe.get_tftp_image_info(self.instance)
+ expected = {
+ 'kernel': ['aaaa', os.path.join(base, 'kernel')],
+ 'ramdisk': ['bbbb', os.path.join(base, 'ramdisk')],
+ 'deploy_kernel': ['cccc', os.path.join(base, 'deploy_kernel')],
+ 'deploy_ramdisk': ['dddd',
+ os.path.join(base, 'deploy_ramdisk')],
+ }
+ self.assertEqual(res, expected)
+
+ # If deploy_kernel_id and deploy_ramdisk_id are specified on
+ # image extra_specs, this should override any default configuration.
+ # Note that it is passed on the 'instance' object, despite being
+ # inherited from the instance_types_extra_specs table.
+ extra_specs = {
+ 'deploy_kernel_id': 'eeee',
+ 'deploy_ramdisk_id': 'ffff',
+ }
+ self.instance['extra_specs'] = extra_specs
+ res = pxe.get_tftp_image_info(self.instance)
+ self.assertEqual(res['deploy_kernel'][0], 'eeee')
+ self.assertEqual(res['deploy_ramdisk'][0], 'ffff')
+
+
+class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
+
+ def test_collect_mac_addresses(self):
+ self._create_node()
+ address_list = [nic['address'] for nic in self.nic_info]
+ address_list.append(self.node_info['prov_mac_address'])
+ address_list.sort()
+ macs = self.driver._collect_mac_addresses(self.context, self.node)
+ self.assertEqual(macs, address_list)
+
+ def test_generate_udev_rules(self):
+ self._create_node()
+ address_list = [nic['address'] for nic in self.nic_info]
+ address_list.append(self.node_info['prov_mac_address'])
+
+ rules = self.driver._generate_udev_rules(self.context, self.node)
+ for address in address_list:
+ self.assertIn('ATTR{address}=="%s"' % address, rules)
+
+ def test_cache_tftp_images(self):
+ self.instance['kernel_id'] = 'aaaa'
+ self.instance['ramdisk_id'] = 'bbbb'
+ extra_specs = {
+ 'deploy_kernel_id': 'cccc',
+ 'deploy_ramdisk_id': 'dddd',
+ }
+ self.instance['extra_specs'] = extra_specs
+ image_info = pxe.get_tftp_image_info(self.instance)
+
+ self.mox.StubOutWithMock(os, 'makedirs')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.makedirs(os.path.join(CONF.baremetal.tftp_root,
+ self.instance['uuid'])).AndReturn(True)
+ for uuid, path in [image_info[label] for label in image_info]:
+ os.path.exists(path).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.driver._cache_tftp_images(
+ self.context, self.instance, image_info)
+ self.mox.VerifyAll()
+
+ def test_cache_image(self):
+ self.mox.StubOutWithMock(os, 'makedirs')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.makedirs(pxe.get_image_dir_path(self.instance)).\
+ AndReturn(True)
+ os.path.exists(pxe.get_image_file_path(self.instance)).\
+ AndReturn(True)
+ self.mox.ReplayAll()
+
+ image_meta = utils.get_test_image_info(
+ self.context, self.instance)
+ self.driver._cache_image(
+ self.context, self.instance, image_meta)
+ self.mox.VerifyAll()
+
+ def test_inject_into_image(self):
+ # NOTE(deva): we could also test this method by stubbing
+ # nova.virt.disk.api._inject_*_into_fs
+ self._create_node()
+ files = []
+ files.append(('/etc/udev/rules.d/70-persistent-net.rules',
+ self.driver._generate_udev_rules(self.context, self.node)))
+ self.instance['hostname'] = 'fake hostname'
+ files.append(('/etc/hostname', 'fake hostname'))
+ self.instance['key_data'] = 'fake ssh key'
+ net_info = utils.get_test_network_info(1)
+ net = pxe.build_network_config(net_info)
+ admin_password = 'fake password'
+
+ self.mox.StubOutWithMock(disk_api, 'inject_data')
+ disk_api.inject_data(
+ admin_password=admin_password,
+ image=pxe.get_image_file_path(self.instance),
+ key='fake ssh key',
+ metadata=None,
+ partition=None,
+ net=net,
+ files=files, # this is what we're really testing
+ ).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.driver._inject_into_image(
+ self.context, self.node, self.instance,
+ network_info=net_info,
+ admin_password=admin_password,
+ injected_files=None)
+ self.mox.VerifyAll()
+
+
+class PXEPublicMethodsTestCase(BareMetalPXETestCase):
+
+ def test_cache_images(self):
+ self._create_node()
+ self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
+ self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
+ self.mox.StubOutWithMock(self.driver, "_cache_image")
+ self.mox.StubOutWithMock(self.driver, "_inject_into_image")
+
+ pxe.get_tftp_image_info(self.instance).AndReturn([])
+ self.driver._cache_tftp_images(self.context, self.instance, [])
+ self.driver._cache_image(self.context, self.instance, [])
+ self.driver._inject_into_image(self.context, self.node, self.instance,
+ self.test_network_info, None, '')
+ self.mox.ReplayAll()
+
+ self.driver.cache_images(
+ self.context, self.node, self.instance,
+ admin_password='',
+ image_meta=[],
+ injected_files=None,
+ network_info=self.test_network_info,
+ )
+ self.mox.VerifyAll()
+
+ def test_destroy_images(self):
+ self._create_node()
+ self.mox.StubOutWithMock(os, 'unlink')
+
+ os.unlink(pxe.get_image_file_path(self.instance))
+ os.unlink(pxe.get_image_dir_path(self.instance))
+ self.mox.ReplayAll()
+
+ self.driver.destroy_images(self.context, self.node, self.instance)
+ self.mox.VerifyAll()
+
+ def test_activate_bootloader(self):
+ self._create_node()
+ macs = [nic['address'] for nic in self.nic_info]
+ macs.append(self.node_info['prov_mac_address'])
+ macs.sort()
+ image_info = {
+ 'deploy_kernel': [None, 'aaaa'],
+ 'deploy_ramdisk': [None, 'bbbb'],
+ 'kernel': [None, 'cccc'],
+ 'ramdisk': [None, 'dddd'],
+ }
+ self.instance['uuid'] = 'fake-uuid'
+ iqn = "iqn-%s" % self.instance['uuid']
+ pxe_config = 'this is a fake pxe config'
+ pxe_path = pxe.get_pxe_config_file_path(self.instance)
+ image_path = pxe.get_image_file_path(self.instance)
+
+ self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
+ self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
+ self.mox.StubOutWithMock(bm_utils, 'random_alnum')
+ self.mox.StubOutWithMock(db, 'bm_deployment_create')
+ self.mox.StubOutWithMock(pxe, 'build_pxe_config')
+ self.mox.StubOutWithMock(bm_utils, 'write_to_file')
+ self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
+
+ pxe.get_tftp_image_info(self.instance).AndReturn(image_info)
+ pxe.get_partition_sizes(self.instance).AndReturn((0, 0))
+ bm_utils.random_alnum(32).AndReturn('alnum')
+ db.bm_deployment_create(
+ self.context, 'alnum', image_path, pxe_path, 0, 0).\
+ AndReturn(1234)
+ pxe.build_pxe_config(
+ 1234, 'alnum', iqn, 'aaaa', 'bbbb', 'cccc', 'dddd').\
+ AndReturn(pxe_config)
+ bm_utils.write_to_file(pxe_path, pxe_config)
+ for mac in macs:
+ bm_utils.create_link_without_raise(
+ pxe_path, pxe.get_pxe_mac_path(mac))
+ self.mox.ReplayAll()
+
+ self.driver.activate_bootloader(
+ self.context, self.node, self.instance)
+ self.mox.VerifyAll()
+
+ def test_deactivate_bootloader(self):
+ self._create_node()
+ macs = [nic['address'] for nic in self.nic_info]
+ macs.append(self.node_info['prov_mac_address'])
+ macs.sort()
+ image_info = {
+ 'deploy_kernel': [None, 'aaaa'],
+ 'deploy_ramdisk': [None, 'bbbb'],
+ 'kernel': [None, 'cccc'],
+ 'ramdisk': [None, 'dddd'],
+ }
+ self.instance['uuid'] = 'fake-uuid'
+ pxe_path = pxe.get_pxe_config_file_path(self.instance)
+
+ self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
+ self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
+ self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
+
+ pxe.get_tftp_image_info(self.instance).AndReturn(image_info)
+ for uuid, path in [image_info[label] for label in image_info]:
+ bm_utils.unlink_without_raise(path)
+ bm_utils.unlink_without_raise(pxe_path)
+ self.driver._collect_mac_addresses(self.context, self.node).\
+ AndReturn(macs)
+ for mac in macs:
+ bm_utils.unlink_without_raise(pxe.get_pxe_mac_path(mac))
+ bm_utils.unlink_without_raise(
+ os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
+ self.mox.ReplayAll()
+
+ self.driver.deactivate_bootloader(
+ self.context, self.node, self.instance)
+ self.mox.VerifyAll()
+
+ def test_deactivate_bootloader_for_nonexistent_instance(self):
+ self._create_node()
+ macs = [nic['address'] for nic in self.nic_info]
+ macs.append(self.node_info['prov_mac_address'])
+ macs.sort()
+ image_info = {
+ 'deploy_kernel': [None, 'aaaa'],
+ 'deploy_ramdisk': [None, 'bbbb'],
+ 'kernel': [None, 'cccc'],
+ 'ramdisk': [None, 'dddd'],
+ }
+ self.instance['uuid'] = 'fake-uuid'
+ pxe_path = pxe.get_pxe_config_file_path(self.instance)
+
+ self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
+ self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
+ self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
+
+ pxe.get_tftp_image_info(self.instance).\
+ AndRaise(exception.NovaException)
+ bm_utils.unlink_without_raise(pxe_path)
+ self.driver._collect_mac_addresses(self.context, self.node).\
+ AndRaise(exception.DBError)
+ bm_utils.unlink_without_raise(
+ os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
+ self.mox.ReplayAll()
+
+ self.driver.deactivate_bootloader(
+ self.context, self.node, self.instance)
+ self.mox.VerifyAll()
diff --git a/nova/tests/baremetal/test_tilera.py b/nova/tests/baremetal/test_tilera.py
deleted file mode 100644
index b86e6c9c1..000000000
--- a/nova/tests/baremetal/test_tilera.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import __builtin__
-import StringIO
-
-from nova import test
-from nova.virt.baremetal import tilera
-
-
-class TileraBareMetalNodesTestCase(test.TestCase):
-
- def setUp(self):
- super(TileraBareMetalNodesTestCase, self).setUp()
- self.board_info = "\n".join([
-'# board_id ip_address mac_address 00:1A:CA:00:57:90 '
-'00:1A:CA:00:58:98 00:1A:CA:00:58:50',
-'6 10.0.2.7 00:1A:CA:00:58:5C 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'7 10.0.2.8 00:1A:CA:00:58:A4 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'8 10.0.2.9 00:1A:CA:00:58:1A 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'9 10.0.2.10 00:1A:CA:00:58:38 10 16385 1000 0 0 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}'])
-
- def tearDown(self):
- super(TileraBareMetalNodesTestCase, self).tearDown()
-
- # Reset the singleton state
- tilera.BareMetalNodes._instance = None
- tilera.BareMetalNodes._is_init = False
-
- def test_singleton(self):
- """Confirm that the object acts like a singleton.
-
- In this case, we check that it only loads the config file once,
- even though it has been instantiated multiple times"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
-
- open("/tftpboot/tilera_boards",
- "r").AndReturn(StringIO.StringIO(self.board_info))
-
- self.mox.ReplayAll()
-
- nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
- nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
-
- def test_get_hw_info(self):
- self.mox.StubOutWithMock(__builtin__, 'open')
-
- open("/tftpboot/tilera_boards",
- "r").AndReturn(StringIO.StringIO(self.board_info))
-
- self.mox.ReplayAll()
- nodes = tilera.BareMetalNodes()
- self.assertEqual(nodes.get_hw_info('vcpus'), 10)
diff --git a/nova/virt/vif.py b/nova/tests/baremetal/test_utils.py
index 69cfd996c..827b1fcaf 100644
--- a/nova/virt/vif.py
+++ b/nova/tests/baremetal/test_utils.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
-# Copyright (C) 2011 Midokura KK
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,20 +16,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""VIF module common to all virt layers."""
+"""Tests for baremetal utils."""
+import mox
-class VIFDriver(object):
- """Abstract class that defines generic interfaces for all VIF drivers."""
- def __init__(self, **kwargs):
- # NOTE(jkoelker) __init__ is here so subclasses *could* take
- # advantage of any kwargs should they need to
- pass
+from nova import exception
+from nova import test
+from nova.virt.baremetal import utils
- def plug(self, instance, vif, **kwargs):
- """Plug VIF into network."""
- raise NotImplementedError()
- def unplug(self, instance, vif, **kwargs):
- """Unplug VIF from network."""
- raise NotImplementedError()
+class BareMetalUtilsTestCase(test.TestCase):
+
+ def test_random_alnum(self):
+ s = utils.random_alnum(10)
+ self.assertEqual(len(s), 10)
+ s = utils.random_alnum(100)
+ self.assertEqual(len(s), 100)
diff --git a/nova/tests/baremetal/test_volume_driver.py b/nova/tests/baremetal/test_volume_driver.py
new file mode 100644
index 000000000..c83277516
--- /dev/null
+++ b/nova/tests/baremetal/test_volume_driver.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for baremetal volume driver."""
+
+from nova.openstack.common import cfg
+from nova import test
+
+from nova.virt.baremetal import volume_driver
+
+CONF = cfg.CONF
+
+SHOW_OUTPUT = """Target 1: iqn.2010-10.org.openstack:volume-00000001
+ System information:
+ Driver: iscsi
+ State: ready
+ I_T nexus information:
+ I_T nexus: 8
+ Initiator: iqn.1993-08.org.debian:01:7780c6a16b4
+ Connection: 0
+ IP Address: 172.17.12.10
+ LUN information:
+ LUN: 0
+ Type: controller
+ SCSI ID: IET 00010000
+ SCSI SN: beaf10
+ Size: 0 MB, Block size: 1
+ Online: Yes
+ Removable media: No
+ Readonly: No
+ Backing store type: null
+ Backing store path: None
+ Backing store flags:
+ LUN: 1
+ Type: disk
+ SCSI ID: IET 00010001
+ SCSI SN: beaf11
+ Size: 1074 MB, Block size: 512
+ Online: Yes
+ Removable media: No
+ Readonly: No
+ Backing store type: rdwr
+ Backing store path: /dev/nova-volumes/volume-00000001
+ Backing store flags:
+ Account information:
+ ACL information:
+ ALL
+Target 2: iqn.2010-10.org.openstack:volume-00000002
+ System information:
+ Driver: iscsi
+ State: ready
+ I_T nexus information:
+ LUN information:
+ LUN: 0
+ Type: controller
+ SCSI ID: IET 00020000
+ SCSI SN: beaf20
+ Size: 0 MB, Block size: 1
+ Online: Yes
+ Removable media: No
+ Readonly: No
+ Backing store type: null
+ Backing store path: None
+ Backing store flags:
+ LUN: 1
+ Type: disk
+ SCSI ID: IET 00020001
+ SCSI SN: beaf21
+ Size: 2147 MB, Block size: 512
+ Online: Yes
+ Removable media: No
+ Readonly: No
+ Backing store type: rdwr
+ Backing store path: /dev/nova-volumes/volume-00000002
+ Backing store flags:
+ Account information:
+ ACL information:
+ ALL
+Target 1000001: iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc
+ System information:
+ Driver: iscsi
+ State: ready
+ I_T nexus information:
+ LUN information:
+ LUN: 0
+ Type: controller
+ SCSI ID: IET f42410000
+ SCSI SN: beaf10000010
+ Size: 0 MB, Block size: 1
+ Online: Yes
+ Removable media: No
+ Readonly: No
+ Backing store type: null
+ Backing store path: None
+ Backing store flags:
+ LUN: 1
+ Type: disk
+ SCSI ID: IET f42410001
+ SCSI SN: beaf10000011
+ Size: 1074 MB, Block size: 512
+ Online: Yes
+ Removable media: No
+ Readonly: No
+ Backing store type: rdwr
+ Backing store path: /dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\
+iqn.2010-10.org.openstack:volume-00000001-lun-1
+ Backing store flags:
+ Account information:
+ ACL information:
+ ALL
+"""
+
+
+def fake_show_tgtadm():
+ return SHOW_OUTPUT
+
+
+class BareMetalVolumeTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BareMetalVolumeTestCase, self).setUp()
+ self.stubs.Set(volume_driver, '_show_tgtadm', fake_show_tgtadm)
+
+ def test_list_backingstore_path(self):
+ l = volume_driver._list_backingstore_path()
+ self.assertEqual(len(l), 3)
+ self.assertIn('/dev/nova-volumes/volume-00000001', l)
+ self.assertIn('/dev/nova-volumes/volume-00000002', l)
+ self.assertIn('/dev/disk/by-path/ip-172.17.12.10:3260-iscsi-'
+ 'iqn.2010-10.org.openstack:volume-00000001-lun-1', l)
+
+ def test_get_next_tid(self):
+ tid = volume_driver._get_next_tid()
+ self.assertEqual(1000002, tid)
+
+ def test_find_tid_found(self):
+ tid = volume_driver._find_tid(
+ 'iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc')
+ self.assertEqual(1000001, tid)
+
+ def test_find_tid_not_found(self):
+ tid = volume_driver._find_tid(
+ 'iqn.2010-10.org.openstack.baremetal:1000002-dev.vdc')
+ self.assertTrue(tid is None)
+
+ def test_get_iqn(self):
+ self.flags(iscsi_iqn_prefix='iqn.2012-12.a.b', group='baremetal')
+ iqn = volume_driver._get_iqn('instname', '/dev/vdx')
+ self.assertEquals('iqn.2012-12.a.b:instname-dev-vdx', iqn)
diff --git a/nova/tests/declare_flags.py b/nova/tests/cells/__init__.py
index 9e8fee123..d1bf725f7 100644
--- a/nova/tests/declare_flags.py
+++ b/nova/tests/cells/__init__.py
@@ -1,7 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
+# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,8 +15,5 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
-from nova.openstack.common import cfg
-
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cfg.IntOpt('answer', default=42, help='test flag'))
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/cells/fakes.py b/nova/tests/cells/fakes.py
new file mode 100644
index 000000000..e996cbe13
--- /dev/null
+++ b/nova/tests/cells/fakes.py
@@ -0,0 +1,202 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Cells tests.
+"""
+
+from nova.cells import driver
+from nova.cells import manager as cells_manager
+from nova.cells import messaging
+from nova.cells import state as cells_state
+import nova.db
+from nova.db import base
+from nova import exception
+from nova.openstack.common import cfg
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+
+
+# Fake Cell Hierarchy
+FAKE_TOP_LEVEL_CELL_NAME = 'api-cell'
+FAKE_CELL_LAYOUT = [{'child-cell1': []},
+ {'child-cell2': [{'grandchild-cell1': []}]},
+ {'child-cell3': [{'grandchild-cell2': []},
+ {'grandchild-cell3': []}]},
+ {'child-cell4': []}]
+
+# build_cell_stub_infos() below will take the above layout and create
+# a fake view of the DB from the perspective of each of the cells.
+# For each cell, a CellStubInfo will be created with this info.
+CELL_NAME_TO_STUB_INFO = {}
+
+
+class FakeDBApi(object):
+ """Cells uses a different DB in each cell. This means in order to
+ stub out things differently per cell, I need to create a fake DBApi
+ object that is instantiated by each fake cell.
+ """
+ def __init__(self, cell_db_entries):
+ self.cell_db_entries = cell_db_entries
+
+ def __getattr__(self, key):
+ return getattr(nova.db, key)
+
+ def cell_get_all(self, ctxt):
+ return self.cell_db_entries
+
+ def compute_node_get_all(self, ctxt):
+ return []
+
+ def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
+ return []
+
+ def instance_get_by_uuid(self, ctxt, instance_uuid):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+
+class FakeCellsDriver(driver.BaseCellsDriver):
+ pass
+
+
+class FakeCellState(cells_state.CellState):
+ def send_message(self, message):
+ message_runner = get_message_runner(self.name)
+ orig_ctxt = message.ctxt
+ json_message = message.to_json()
+ message = message_runner.message_from_json(json_message)
+ # Restore this so we can use mox and verify same context
+ message.ctxt = orig_ctxt
+ message.process()
+
+
+class FakeCellStateManager(cells_state.CellStateManager):
+ def __init__(self, *args, **kwargs):
+ super(FakeCellStateManager, self).__init__(*args,
+ cell_state_cls=FakeCellState, **kwargs)
+
+
+class FakeCellsManager(cells_manager.CellsManager):
+ def __init__(self, *args, **kwargs):
+ super(FakeCellsManager, self).__init__(*args,
+ cell_state_manager=FakeCellStateManager,
+ **kwargs)
+
+
+class CellStubInfo(object):
+ def __init__(self, test_case, cell_name, db_entries):
+ self.test_case = test_case
+ self.cell_name = cell_name
+ self.db_entries = db_entries
+
+ def fake_base_init(_self, *args, **kwargs):
+ _self.db = FakeDBApi(db_entries)
+
+ test_case.stubs.Set(base.Base, '__init__', fake_base_init)
+ self.cells_manager = FakeCellsManager()
+ # Fix the cell name, as it normally uses CONF.cells.name
+ msg_runner = self.cells_manager.msg_runner
+ msg_runner.our_name = self.cell_name
+ self.cells_manager.state_manager.my_cell_state.name = self.cell_name
+
+
+def _build_cell_stub_info(test_case, our_name, parent_path, children):
+ cell_db_entries = []
+ cur_db_id = 1
+ sep_char = messaging._PATH_CELL_SEP
+ if parent_path:
+ cell_db_entries.append(
+ dict(id=cur_db_id,
+ name=parent_path.split(sep_char)[-1],
+ is_parent=True,
+ username='username%s' % cur_db_id,
+ password='password%s' % cur_db_id,
+ rpc_host='rpc_host%s' % cur_db_id,
+ rpc_port='rpc_port%s' % cur_db_id,
+ rpc_virtual_host='rpc_vhost%s' % cur_db_id))
+ cur_db_id += 1
+ our_path = parent_path + sep_char + our_name
+ else:
+ our_path = our_name
+ for child in children:
+ for child_name, grandchildren in child.items():
+ _build_cell_stub_info(test_case, child_name, our_path,
+ grandchildren)
+ cell_entry = dict(id=cur_db_id,
+ name=child_name,
+ username='username%s' % cur_db_id,
+ password='password%s' % cur_db_id,
+ rpc_host='rpc_host%s' % cur_db_id,
+ rpc_port='rpc_port%s' % cur_db_id,
+ rpc_virtual_host='rpc_vhost%s' % cur_db_id,
+ is_parent=False)
+ cell_db_entries.append(cell_entry)
+ cur_db_id += 1
+ stub_info = CellStubInfo(test_case, our_name, cell_db_entries)
+ CELL_NAME_TO_STUB_INFO[our_name] = stub_info
+
+
+def _build_cell_stub_infos(test_case):
+ _build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '',
+ FAKE_CELL_LAYOUT)
+
+
+def init(test_case):
+ global CELL_NAME_TO_STUB_INFO
+ test_case.flags(driver='nova.tests.cells.fakes.FakeCellsDriver',
+ group='cells')
+ CELL_NAME_TO_STUB_INFO = {}
+ _build_cell_stub_infos(test_case)
+
+
+def _get_cell_stub_info(cell_name):
+ return CELL_NAME_TO_STUB_INFO[cell_name]
+
+
+def get_state_manager(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager.state_manager
+
+
+def get_cell_state(cur_cell_name, tgt_cell_name):
+ state_manager = get_state_manager(cur_cell_name)
+ cell = state_manager.child_cells.get(tgt_cell_name)
+ if cell is None:
+ cell = state_manager.parent_cells.get(tgt_cell_name)
+ return cell
+
+
+def get_cells_manager(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager
+
+
+def get_message_runner(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager.msg_runner
+
+
+def stub_tgt_method(test_case, cell_name, method_name, method):
+ msg_runner = get_message_runner(cell_name)
+ tgt_msg_methods = msg_runner.methods_by_type['targeted']
+ setattr(tgt_msg_methods, method_name, method)
+
+
+def stub_bcast_method(test_case, cell_name, method_name, method):
+ msg_runner = get_message_runner(cell_name)
+ tgt_msg_methods = msg_runner.methods_by_type['broadcast']
+ setattr(tgt_msg_methods, method_name, method)
+
+
+def stub_bcast_methods(test_case, method_name, method):
+ for cell_name in CELL_NAME_TO_STUB_INFO.keys():
+ stub_bcast_method(test_case, cell_name, method_name, method)
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
new file mode 100644
index 000000000..ef165f4ed
--- /dev/null
+++ b/nova/tests/cells/test_cells_manager.py
@@ -0,0 +1,239 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellsManager
+"""
+import datetime
+
+from nova.cells import messaging
+from nova.cells import utils as cells_utils
+from nova import context
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.cells import fakes
+
+
+class CellsManagerClassTestCase(test.TestCase):
+ """Test case for CellsManager class."""
+
+ def setUp(self):
+ super(CellsManagerClassTestCase, self).setUp()
+ fakes.init(self)
+ # pick a child cell to use for tests.
+ self.our_cell = 'grandchild-cell1'
+ self.cells_manager = fakes.get_cells_manager(self.our_cell)
+ self.msg_runner = self.cells_manager.msg_runner
+ self.driver = self.cells_manager.driver
+ self.ctxt = 'fake_context'
+
+ def _get_fake_responses(self):
+ responses = []
+ expected_responses = []
+ for x in xrange(1, 4):
+ responses.append(messaging.Response('cell%s' % x, x, False))
+ expected_responses.append(('cell%s' % x, x))
+ return expected_responses, responses
+
+ def test_get_cell_info_for_neighbors(self):
+ self.mox.StubOutWithMock(self.cells_manager.state_manager,
+ 'get_cell_info_for_neighbors')
+ self.cells_manager.state_manager.get_cell_info_for_neighbors()
+ self.mox.ReplayAll()
+ self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
+
+ def test_post_start_hook_child_cell(self):
+ self.mox.StubOutWithMock(self.driver, 'start_consumers')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
+
+ self.driver.start_consumers(self.msg_runner)
+ context.get_admin_context().AndReturn(self.ctxt)
+ self.cells_manager._update_our_parents(self.ctxt)
+ self.mox.ReplayAll()
+ self.cells_manager.post_start_hook()
+
+ def test_post_start_hook_middle_cell(self):
+ cells_manager = fakes.get_cells_manager('child-cell2')
+ msg_runner = cells_manager.msg_runner
+ driver = cells_manager.driver
+
+ self.mox.StubOutWithMock(driver, 'start_consumers')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(msg_runner,
+ 'ask_children_for_capabilities')
+ self.mox.StubOutWithMock(msg_runner,
+ 'ask_children_for_capacities')
+
+ driver.start_consumers(msg_runner)
+ context.get_admin_context().AndReturn(self.ctxt)
+ msg_runner.ask_children_for_capabilities(self.ctxt)
+ msg_runner.ask_children_for_capacities(self.ctxt)
+ self.mox.ReplayAll()
+ cells_manager.post_start_hook()
+
+ def test_update_our_parents(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'tell_parents_our_capabilities')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'tell_parents_our_capacities')
+
+ self.msg_runner.tell_parents_our_capabilities(self.ctxt)
+ self.msg_runner.tell_parents_our_capacities(self.ctxt)
+ self.mox.ReplayAll()
+ self.cells_manager._update_our_parents(self.ctxt)
+
+ def test_schedule_run_instance(self):
+ host_sched_kwargs = 'fake_host_sched_kwargs_silently_passed'
+ self.mox.StubOutWithMock(self.msg_runner, 'schedule_run_instance')
+ our_cell = self.msg_runner.state_manager.get_my_state()
+ self.msg_runner.schedule_run_instance(self.ctxt, our_cell,
+ host_sched_kwargs)
+ self.mox.ReplayAll()
+ self.cells_manager.schedule_run_instance(self.ctxt,
+ host_sched_kwargs=host_sched_kwargs)
+
+ def test_run_compute_api_method(self):
+ # Args should just be silently passed through
+ cell_name = 'fake-cell-name'
+ method_info = 'fake-method-info'
+
+ fake_response = messaging.Response('fake', 'fake', False)
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'run_compute_api_method')
+ self.mox.StubOutWithMock(fake_response,
+ 'value_or_raise')
+ self.msg_runner.run_compute_api_method(self.ctxt,
+ cell_name,
+ method_info,
+ True).AndReturn(fake_response)
+ fake_response.value_or_raise().AndReturn('fake-response')
+ self.mox.ReplayAll()
+ response = self.cells_manager.run_compute_api_method(
+ self.ctxt, cell_name=cell_name, method_info=method_info,
+ call=True)
+ self.assertEqual('fake-response', response)
+
+ def test_instance_update_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
+ self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_update_at_top(self.ctxt,
+ instance='fake-instance')
+
+ def test_instance_destroy_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
+ self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_destroy_at_top(self.ctxt,
+ instance='fake-instance')
+
+ def test_instance_delete_everywhere(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_delete_everywhere')
+ self.msg_runner.instance_delete_everywhere(self.ctxt,
+ 'fake-instance',
+ 'fake-type')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_delete_everywhere(
+ self.ctxt, instance='fake-instance',
+ delete_type='fake-type')
+
+ def test_instance_fault_create_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_fault_create_at_top')
+ self.msg_runner.instance_fault_create_at_top(self.ctxt,
+ 'fake-fault')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_fault_create_at_top(
+ self.ctxt, instance_fault='fake-fault')
+
+ def test_bw_usage_update_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'bw_usage_update_at_top')
+ self.msg_runner.bw_usage_update_at_top(self.ctxt,
+ 'fake-bw-info')
+ self.mox.ReplayAll()
+ self.cells_manager.bw_usage_update_at_top(
+ self.ctxt, bw_update_info='fake-bw-info')
+
+ def test_heal_instances(self):
+ self.flags(instance_updated_at_threshold=1000,
+ instance_update_num_instances=2,
+ group='cells')
+
+ fake_context = context.RequestContext('fake', 'fake')
+ stalled_time = timeutils.utcnow()
+ updated_since = stalled_time - datetime.timedelta(seconds=1000)
+
+ def utcnow():
+ return stalled_time
+
+ call_info = {'get_instances': 0, 'sync_instances': []}
+
+ instances = ['instance1', 'instance2', 'instance3']
+
+ def get_instances_to_sync(context, **kwargs):
+ self.assertEqual(context, fake_context)
+ call_info['shuffle'] = kwargs.get('shuffle')
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+ call_info['get_instances'] += 1
+ return iter(instances)
+
+ def instance_get_by_uuid(context, uuid):
+ return instances[int(uuid[-1]) - 1]
+
+ def sync_instance(context, instance):
+ self.assertEqual(context, fake_context)
+ call_info['sync_instances'].append(instance)
+
+ self.stubs.Set(cells_utils, 'get_instances_to_sync',
+ get_instances_to_sync)
+ self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
+ instance_get_by_uuid)
+ self.stubs.Set(self.cells_manager, '_sync_instance',
+ sync_instance)
+ self.stubs.Set(timeutils, 'utcnow', utcnow)
+
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 1)
+ # Only first 2
+ self.assertEqual(call_info['sync_instances'],
+ instances[:2])
+
+ call_info['sync_instances'] = []
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertEqual(call_info['project_id'], None)
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 2)
+ # Now the last 1 and the first 1
+ self.assertEqual(call_info['sync_instances'],
+ [instances[-1], instances[0]])
+
+ def test_sync_instances(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'sync_instances')
+ self.msg_runner.sync_instances(self.ctxt, 'fake-project',
+ 'fake-time', 'fake-deleted')
+ self.mox.ReplayAll()
+ self.cells_manager.sync_instances(self.ctxt,
+ project_id='fake-project',
+ updated_since='fake-time',
+ deleted='fake-deleted')
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
new file mode 100644
index 000000000..da45721ed
--- /dev/null
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -0,0 +1,960 @@
+# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells Messaging module
+"""
+import mox
+
+from nova.cells import messaging
+from nova.cells import utils as cells_utils
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.cells import fakes
+
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+CONF.import_opt('allowed_rpc_exception_modules',
+ 'nova.openstack.common.rpc')
+
+
+class CellsMessageClassesTestCase(test.TestCase):
+ """Test case for the main Cells Message classes."""
+ def setUp(self):
+ super(CellsMessageClassesTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ # Need to be able to deserialize test.TestingException.
+ allowed_modules = CONF.allowed_rpc_exception_modules
+ allowed_modules.append('nova.test')
+ self.flags(allowed_rpc_exception_modules=allowed_modules)
+ self.our_name = 'api-cell'
+ self.msg_runner = fakes.get_message_runner(self.our_name)
+ self.state_manager = self.msg_runner.state_manager
+
+ def test_reverse_path(self):
+ path = 'a!b!c!d'
+ expected = 'd!c!b!a'
+ rev_path = messaging._reverse_path(path)
+ self.assertEqual(rev_path, expected)
+
+ def test_response_cell_name_from_path(self):
+ # test array with tuples of inputs/expected outputs
+ test_paths = [('cell1', 'cell1'),
+ ('cell1!cell2', 'cell2!cell1'),
+ ('cell1!cell2!cell3', 'cell3!cell2!cell1')]
+
+ for test_input, expected_output in test_paths:
+ self.assertEqual(expected_output,
+ messaging._response_cell_name_from_path(test_input))
+
+ def test_response_cell_name_from_path_neighbor_only(self):
+ # test array with tuples of inputs/expected outputs
+ test_paths = [('cell1', 'cell1'),
+ ('cell1!cell2', 'cell2!cell1'),
+ ('cell1!cell2!cell3', 'cell3!cell2')]
+
+ for test_input, expected_output in test_paths:
+ self.assertEqual(expected_output,
+ messaging._response_cell_name_from_path(test_input,
+ neighbor_only=True))
+
+ def test_targeted_message(self):
+ self.flags(max_hop_count=99, group='cells')
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual(self.ctxt, tgt_message.ctxt)
+ self.assertEqual(method, tgt_message.method_name)
+ self.assertEqual(method_kwargs, tgt_message.method_kwargs)
+ self.assertEqual(direction, tgt_message.direction)
+ self.assertEqual(target_cell, target_cell)
+ self.assertFalse(tgt_message.fanout)
+ self.assertFalse(tgt_message.need_response)
+ self.assertEqual(self.our_name, tgt_message.routing_path)
+ self.assertEqual(1, tgt_message.hop_count)
+ self.assertEqual(99, tgt_message.max_hop_count)
+ self.assertFalse(tgt_message.is_broadcast)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ child_cell = self.state_manager.get_child_cell('child-cell2')
+ self.assertEqual(child_cell, next_hop)
+
+ def test_create_targeted_message_with_response(self):
+ self.flags(max_hop_count=99, group='cells')
+ our_name = 'child-cell1'
+ target_cell = 'child-cell1!api-cell'
+ msg_runner = fakes.get_message_runner(our_name)
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ tgt_message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ self.assertEqual(self.ctxt, tgt_message.ctxt)
+ self.assertEqual(method, tgt_message.method_name)
+ self.assertEqual(method_kwargs, tgt_message.method_kwargs)
+ self.assertEqual(direction, tgt_message.direction)
+ self.assertEqual(target_cell, target_cell)
+ self.assertFalse(tgt_message.fanout)
+ self.assertTrue(tgt_message.need_response)
+ self.assertEqual(our_name, tgt_message.routing_path)
+ self.assertEqual(1, tgt_message.hop_count)
+ self.assertEqual(99, tgt_message.max_hop_count)
+ self.assertFalse(tgt_message.is_broadcast)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
+ self.assertEqual(parent_cell, next_hop)
+
+ def test_targeted_message_when_target_is_cell_state(self):
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ target_cell = self.state_manager.get_child_cell('child-cell2')
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ self.assertEqual(target_cell, next_hop)
+
+ def test_targeted_message_when_target_cell_state_is_me(self):
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ target_cell = self.state_manager.get_my_state()
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual('api-cell', tgt_message.target_cell)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ self.assertEqual(target_cell, next_hop)
+
+ def test_create_broadcast_message(self):
+ self.flags(max_hop_count=99, group='cells')
+ self.flags(name='api-cell', max_hop_count=99, group='cells')
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction)
+ self.assertEqual(self.ctxt, bcast_message.ctxt)
+ self.assertEqual(method, bcast_message.method_name)
+ self.assertEqual(method_kwargs, bcast_message.method_kwargs)
+ self.assertEqual(direction, bcast_message.direction)
+ self.assertFalse(bcast_message.fanout)
+ self.assertFalse(bcast_message.need_response)
+ self.assertEqual(self.our_name, bcast_message.routing_path)
+ self.assertEqual(1, bcast_message.hop_count)
+ self.assertEqual(99, bcast_message.max_hop_count)
+ self.assertTrue(bcast_message.is_broadcast)
+ # Correct next hops?
+ next_hops = bcast_message._get_next_hops()
+ child_cells = self.state_manager.get_child_cells()
+ self.assertEqual(child_cells, next_hops)
+
+ def test_create_broadcast_message_with_response(self):
+ self.flags(max_hop_count=99, group='cells')
+ our_name = 'child-cell1'
+ msg_runner = fakes.get_message_runner(our_name)
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
+ method, method_kwargs, direction, need_response=True)
+ self.assertEqual(self.ctxt, bcast_message.ctxt)
+ self.assertEqual(method, bcast_message.method_name)
+ self.assertEqual(method_kwargs, bcast_message.method_kwargs)
+ self.assertEqual(direction, bcast_message.direction)
+ self.assertFalse(bcast_message.fanout)
+ self.assertTrue(bcast_message.need_response)
+ self.assertEqual(our_name, bcast_message.routing_path)
+ self.assertEqual(1, bcast_message.hop_count)
+ self.assertEqual(99, bcast_message.max_hop_count)
+ self.assertTrue(bcast_message.is_broadcast)
+ # Correct next hops?
+ next_hops = bcast_message._get_next_hops()
+ parent_cells = msg_runner.state_manager.get_parent_cells()
+ self.assertEqual(parent_cells, next_hops)
+
+ def test_self_targeted_message(self):
+ target_cell = 'api-cell'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_child_targeted_message(self):
+ target_cell = 'api-cell!child-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_grandchild_targeted_message(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_grandchild_targeted_message_with_response(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+ return 'our_fake_response'
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+ self.assertFalse(response.failure)
+ self.assertTrue(response.value_or_raise(), 'our_fake_response')
+
+ def test_grandchild_targeted_message_with_error(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('this should be returned')
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+ def test_grandchild_targeted_message_max_hops(self):
+ self.flags(max_hop_count=2, group='cells')
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('should not be reached')
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellMaxHopCountReached,
+ response.value_or_raise)
+
+ def test_targeted_message_invalid_cell(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell4'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellRoutingInconsistency,
+ response.value_or_raise)
+
+ def test_targeted_message_invalid_cell2(self):
+ target_cell = 'unknown-cell!child-cell2'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellRoutingInconsistency,
+ response.value_or_raise)
+
+ def test_broadcast_routing(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True)
+ bcast_message.process()
+ # fakes creates 8 cells (including ourself).
+ self.assertEqual(len(cells), 8)
+
+ def test_broadcast_routing_up(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ msg_runner = fakes.get_message_runner('grandchild-cell3')
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
+ method, method_kwargs,
+ direction,
+ run_locally=True)
+ bcast_message.process()
+ # Paths are reversed, since going 'up'
+ expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
+ 'grandchild-cell3!child-cell3!api-cell'])
+ self.assertEqual(expected, cells)
+
+ def test_broadcast_routing_without_ourselves(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=False)
+ bcast_message.process()
+ # fakes creates 8 cells (including ourself). So we should see
+ # only 7 here.
+ self.assertEqual(len(cells), 7)
+
+ def test_broadcast_routing_with_response(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ for response in responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ def test_broadcast_routing_with_response_max_hops(self):
+ self.flags(max_hop_count=2, group='cells')
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ # Should only get responses from our immediate children (and
+ # ourselves)
+ self.assertEqual(len(responses), 5)
+ for response in responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ def test_broadcast_routing_with_all_erroring(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('fake failure')
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ for response in responses:
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+ def test_broadcast_routing_with_two_erroring(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method_failing(message, **kwargs):
+ raise test.TestingException('fake failure')
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+ fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
+ our_fake_method_failing)
+ fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
+ our_fake_method_failing)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ failure_responses = [resp for resp in responses if resp.failure]
+ success_responses = [resp for resp in responses if not resp.failure]
+ self.assertEqual(len(failure_responses), 2)
+ self.assertEqual(len(success_responses), 6)
+
+ for response in success_responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ for response in failure_responses:
+ self.assertIn(response.cell_name, ['api-cell!child-cell2',
+ 'api-cell!child-cell3!grandchild-cell3'])
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+
+class CellsTargetedMethodsTestCase(test.TestCase):
+ """Test case for _TargetedMessageMethods class. Most of these
+ tests actually test the full path from the MessageRunner through
+ to the functionality of the message method. Hits 2 birds with 1
+ stone, even though it's a little more than a unit test.
+ """
+ def setUp(self):
+ super(CellsTargetedMethodsTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs('api-cell', 'api-cell!child-cell2')
+
+ def _setup_attrs(self, source_cell, target_cell):
+ self.tgt_cell_name = target_cell
+ self.src_msg_runner = fakes.get_message_runner(source_cell)
+ self.src_state_manager = self.src_msg_runner.state_manager
+ tgt_shortname = target_cell.split('!')[-1]
+ self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
+ self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
+ self.tgt_scheduler = self.tgt_msg_runner.scheduler
+ self.tgt_state_manager = self.tgt_msg_runner.state_manager
+ methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
+ self.tgt_methods_cls = methods_cls
+ self.tgt_compute_api = methods_cls.compute_api
+ self.tgt_db_inst = methods_cls.db
+
+ def test_schedule_run_instance(self):
+ host_sched_kwargs = {'filter_properties': {},
+ 'key1': 'value1',
+ 'key2': 'value2'}
+ self.mox.StubOutWithMock(self.tgt_scheduler, 'run_instance')
+ self.tgt_scheduler.run_instance(self.ctxt, host_sched_kwargs)
+ self.mox.ReplayAll()
+ self.src_msg_runner.schedule_run_instance(self.ctxt,
+ self.tgt_cell_name,
+ host_sched_kwargs)
+
+ def test_call_compute_api_method(self):
+
+ instance_uuid = 'fake_instance_uuid'
+ method_info = {'method': 'reboot',
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'reboot')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn(
+ 'fake_instance')
+ self.tgt_compute_api.reboot(self.ctxt, 'fake_instance', 2, 3,
+ arg1='val1', arg2='val2').AndReturn('fake_result')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_call_compute_api_method_unknown_instance(self):
+ # Unknown instance should send a broadcast up that instance
+ # is gone.
+ instance_uuid = 'fake_instance_uuid'
+ instance = {'uuid': instance_uuid}
+ method_info = {'method': 'reboot',
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ 'fake_instance_uuid').AndRaise(
+ exception.InstanceNotFound(instance_id=instance_uuid))
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ self.assertRaises(exception.InstanceNotFound,
+ response.value_or_raise)
+
+ def test_update_capabilities(self):
+ # Route up to API
+ self._setup_attrs('child-cell2', 'child-cell2!api-cell')
+ capabs = {'cap1': set(['val1', 'val2']),
+ 'cap2': set(['val3'])}
+ # The list(set([])) seems silly, but we can't assume the order
+ # of the list... This behavior should match the code we're
+ # testing... which is check that a set was converted to a list.
+ expected_capabs = {'cap1': list(set(['val1', 'val2'])),
+ 'cap2': ['val3']}
+ self.mox.StubOutWithMock(self.src_state_manager,
+ 'get_our_capabilities')
+ self.mox.StubOutWithMock(self.tgt_state_manager,
+ 'update_cell_capabilities')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capabilities')
+ self.src_state_manager.get_our_capabilities().AndReturn(capabs)
+ self.tgt_state_manager.update_cell_capabilities('child-cell2',
+ expected_capabs)
+ self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ def test_update_capacities(self):
+ self._setup_attrs('child-cell2', 'child-cell2!api-cell')
+ capacs = 'fake_capacs'
+ self.mox.StubOutWithMock(self.src_state_manager,
+ 'get_our_capacities')
+ self.mox.StubOutWithMock(self.tgt_state_manager,
+ 'update_cell_capacities')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capacities')
+ self.src_state_manager.get_our_capacities().AndReturn(capacs)
+ self.tgt_state_manager.update_cell_capacities('child-cell2',
+ capacs)
+ self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ def test_announce_capabilities(self):
+ self._setup_attrs('api-cell', 'api-cell!child-cell1')
+ # To make this easier to test, make us only have 1 child cell.
+ cell_state = self.src_state_manager.child_cells['child-cell1']
+ self.src_state_manager.child_cells = {'child-cell1': cell_state}
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capabilities')
+ self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
+
+ def test_announce_capacities(self):
+ self._setup_attrs('api-cell', 'api-cell!child-cell1')
+ # To make this easier to test, make us only have 1 child cell.
+ cell_state = self.src_state_manager.child_cells['child-cell1']
+ self.src_state_manager.child_cells = {'child-cell1': cell_state}
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capacities')
+ self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.ask_children_for_capacities(self.ctxt)
+
+
+class CellsBroadcastMethodsTestCase(test.TestCase):
+ """Test case for _BroadcastMessageMethods class. Most of these
+ tests actually test the full path from the MessageRunner through
+ to the functionality of the message method. Hits 2 birds with 1
+ stone, even though it's a little more than a unit test.
+ """
+
+ def setUp(self):
+ super(CellsBroadcastMethodsTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs()
+
+ def _setup_attrs(self, up=True):
+ mid_cell = 'child-cell2'
+ if up:
+ src_cell = 'grandchild-cell1'
+ tgt_cell = 'api-cell'
+ else:
+ src_cell = 'api-cell'
+ tgt_cell = 'grandchild-cell1'
+
+ self.src_msg_runner = fakes.get_message_runner(src_cell)
+ methods_cls = self.src_msg_runner.methods_by_type['broadcast']
+ self.src_methods_cls = methods_cls
+ self.src_db_inst = methods_cls.db
+ self.src_compute_api = methods_cls.compute_api
+
+ self.mid_msg_runner = fakes.get_message_runner(mid_cell)
+ methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
+ self.mid_methods_cls = methods_cls
+ self.mid_db_inst = methods_cls.db
+ self.mid_compute_api = methods_cls.compute_api
+
+ self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
+ methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
+ self.tgt_methods_cls = methods_cls
+ self.tgt_db_inst = methods_cls.db
+ self.tgt_compute_api = methods_cls.compute_api
+
+ def test_at_the_top(self):
+ self.assertTrue(self.tgt_methods_cls._at_the_top())
+ self.assertFalse(self.mid_methods_cls._at_the_top())
+ self.assertFalse(self.src_methods_cls._at_the_top())
+
+ def test_instance_update_at_top(self):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'other': 'moo'}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'uuid': 'fake_uuid',
+ 'security_groups': 'fake',
+ 'instance_type': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'other': 'meow'}
+ expected_sys_metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ expected_info_cache = {'other': 'moo'}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
+ expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
+ 'other': 'meow',
+ 'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'instance_info_cache_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'instance_info_cache_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'instance_info_cache_update')
+ self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
+ expected_instance,
+ update_cells=False)
+ self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
+ expected_info_cache,
+ update_cells=False)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
+
+ def test_instance_destroy_at_top(self):
+ fake_instance = {'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
+ self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
+ update_cells=False)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
+
+ def test_instance_hard_delete_everywhere(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ instance = {'uuid': 'meow'}
+
+ # Should not be called in src (API cell)
+ self.mox.StubOutWithMock(self.src_compute_api, 'delete')
+
+ self.mox.StubOutWithMock(self.mid_compute_api, 'delete')
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'delete')
+
+ self.mid_compute_api.delete(self.ctxt, instance)
+ self.tgt_compute_api.delete(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_delete_everywhere(self.ctxt,
+ instance, 'hard')
+
+ def test_instance_soft_delete_everywhere(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ instance = {'uuid': 'meow'}
+
+ # Should not be called in src (API cell)
+ self.mox.StubOutWithMock(self.src_compute_api, 'soft_delete')
+
+ self.mox.StubOutWithMock(self.mid_compute_api, 'soft_delete')
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'soft_delete')
+
+ self.mid_compute_api.soft_delete(self.ctxt, instance)
+ self.tgt_compute_api.soft_delete(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_delete_everywhere(self.ctxt,
+ instance, 'soft')
+
+ def test_instance_fault_create_at_top(self):
+ fake_instance_fault = {'id': 1,
+ 'other stuff': 2,
+ 'more stuff': 3}
+ expected_instance_fault = {'other stuff': 2,
+ 'more stuff': 3}
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_fault_create')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'instance_fault_create')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_fault_create')
+ self.tgt_db_inst.instance_fault_create(self.ctxt,
+ expected_instance_fault)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_fault_create_at_top(self.ctxt,
+ fake_instance_fault)
+
+ def test_bw_usage_update_at_top(self):
+ fake_bw_update_info = {'uuid': 'fake_uuid',
+ 'mac': 'fake_mac',
+ 'start_period': 'fake_start_period',
+ 'bw_in': 'fake_bw_in',
+ 'bw_out': 'fake_bw_out',
+ 'last_ctr_in': 'fake_last_ctr_in',
+ 'last_ctr_out': 'fake_last_ctr_out',
+ 'last_refreshed': 'fake_last_refreshed'}
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
+ self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
+ fake_bw_update_info)
+
+ def test_sync_instances(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ project_id = 'fake_project_id'
+ updated_since_raw = 'fake_updated_since_raw'
+ updated_since_parsed = 'fake_updated_since_parsed'
+ deleted = 'fake_deleted'
+
+ instance1 = dict(uuid='fake_uuid1', deleted=False)
+ instance2 = dict(uuid='fake_uuid2', deleted=True)
+ fake_instances = [instance1, instance2]
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_update_at_top')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.mox.StubOutWithMock(timeutils, 'parse_isotime')
+ self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
+
+ # Middle cell.
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn([])
+
+ # Bottom/Target cell
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn(fake_instances)
+ self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.sync_instances(self.ctxt,
+ project_id, updated_since_raw, deleted)
diff --git a/nova/tests/cells/test_cells_rpc_driver.py b/nova/tests/cells/test_cells_rpc_driver.py
new file mode 100644
index 000000000..a44fe9376
--- /dev/null
+++ b/nova/tests/cells/test_cells_rpc_driver.py
@@ -0,0 +1,218 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells RPC Communication Driver
+"""
+
+from nova.cells import messaging
+from nova.cells import rpc_driver
+from nova import context
+from nova.openstack.common import cfg
+from nova.openstack.common import rpc
+from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
+from nova import test
+from nova.tests.cells import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver',
+ group='cells')
+
+
+class CellsRPCDriverTestCase(test.TestCase):
+ """Test case for Cells communication via RPC."""
+
+ def setUp(self):
+ super(CellsRPCDriverTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self.driver = rpc_driver.CellsRPCDriver()
+
+ def test_start_consumers(self):
+ self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
+ rpc_consumers = []
+ rpc_conns = []
+ fake_msg_runner = fakes.get_message_runner('api-cell')
+ call_info = {}
+
+ class FakeInterCellRPCDispatcher(object):
+ def __init__(_self, msg_runner):
+ self.assertEqual(fake_msg_runner, msg_runner)
+ call_info['intercell_dispatcher'] = _self
+
+ class FakeRPCDispatcher(object):
+ def __init__(_self, proxy_objs):
+ self.assertEqual([call_info['intercell_dispatcher']],
+ proxy_objs)
+ call_info['rpc_dispatcher'] = _self
+
+ class FakeRPCConn(object):
+ def create_consumer(_self, topic, proxy_obj, **kwargs):
+ self.assertEqual(call_info['rpc_dispatcher'], proxy_obj)
+ rpc_consumers.append((topic, kwargs))
+
+ def consume_in_thread(_self):
+ pass
+
+ def _fake_create_connection(new):
+ self.assertTrue(new)
+ fake_conn = FakeRPCConn()
+ rpc_conns.append(fake_conn)
+ return fake_conn
+
+ self.stubs.Set(rpc, 'create_connection', _fake_create_connection)
+ self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher',
+ FakeInterCellRPCDispatcher)
+ self.stubs.Set(rpc_dispatcher, 'RpcDispatcher', FakeRPCDispatcher)
+
+ self.driver.start_consumers(fake_msg_runner)
+
+ for message_type in ['broadcast', 'response', 'targeted']:
+ topic = 'cells.intercell42.' + message_type
+ self.assertIn((topic, {'fanout': True}), rpc_consumers)
+ self.assertIn((topic, {'fanout': False}), rpc_consumers)
+ self.assertEqual(rpc_conns, self.driver.rpc_connections)
+
+ def test_stop_consumers(self):
+ call_info = {'closed': []}
+
+ class FakeRPCConn(object):
+ def close(self):
+ call_info['closed'].append(self)
+
+ fake_conns = [FakeRPCConn() for x in xrange(5)]
+ self.driver.rpc_connections = fake_conns
+ self.driver.stop_consumers()
+ self.assertEqual(fake_conns, call_info['closed'])
+
+ def test_send_message_to_cell_cast(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, 'fake', 'fake', 'down', cell_state, fanout=False)
+
+ call_info = {}
+
+ def _fake_make_msg(method, **kwargs):
+ call_info['rpc_method'] = method
+ call_info['rpc_kwargs'] = kwargs
+ return 'fake-message'
+
+ def _fake_cast_to_server(*args, **kwargs):
+ call_info['cast_args'] = args
+ call_info['cast_kwargs'] = kwargs
+
+ self.stubs.Set(rpc, 'cast_to_server', _fake_cast_to_server)
+ self.stubs.Set(self.driver.intercell_rpcapi, 'make_msg',
+ _fake_make_msg)
+ self.stubs.Set(self.driver.intercell_rpcapi, 'cast_to_server',
+ _fake_cast_to_server)
+
+ self.driver.send_message_to_cell(cell_state, message)
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 'rpc_port2',
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_cast_args = (self.ctxt, expected_server_params,
+ 'fake-message')
+ expected_cast_kwargs = {'topic': 'cells.intercell.targeted'}
+ expected_rpc_kwargs = {'message': message.to_json()}
+ self.assertEqual(expected_cast_args, call_info['cast_args'])
+ self.assertEqual(expected_cast_kwargs, call_info['cast_kwargs'])
+ self.assertEqual('process_message', call_info['rpc_method'])
+ self.assertEqual(expected_rpc_kwargs, call_info['rpc_kwargs'])
+
+ def test_send_message_to_cell_fanout_cast(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, 'fake', 'fake', 'down', cell_state, fanout=True)
+
+ call_info = {}
+
+ def _fake_make_msg(method, **kwargs):
+ call_info['rpc_method'] = method
+ call_info['rpc_kwargs'] = kwargs
+ return 'fake-message'
+
+ def _fake_fanout_cast_to_server(*args, **kwargs):
+ call_info['cast_args'] = args
+ call_info['cast_kwargs'] = kwargs
+
+ self.stubs.Set(rpc, 'fanout_cast_to_server',
+ _fake_fanout_cast_to_server)
+ self.stubs.Set(self.driver.intercell_rpcapi, 'make_msg',
+ _fake_make_msg)
+ self.stubs.Set(self.driver.intercell_rpcapi,
+ 'fanout_cast_to_server', _fake_fanout_cast_to_server)
+
+ self.driver.send_message_to_cell(cell_state, message)
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 'rpc_port2',
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_cast_args = (self.ctxt, expected_server_params,
+ 'fake-message')
+ expected_cast_kwargs = {'topic': 'cells.intercell.targeted'}
+ expected_rpc_kwargs = {'message': message.to_json()}
+ self.assertEqual(expected_cast_args, call_info['cast_args'])
+ self.assertEqual(expected_cast_kwargs, call_info['cast_kwargs'])
+ self.assertEqual('process_message', call_info['rpc_method'])
+ self.assertEqual(expected_rpc_kwargs, call_info['rpc_kwargs'])
+
+ def test_rpc_topic_uses_message_type(self):
+ self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._BroadcastMessage(msg_runner,
+ self.ctxt, 'fake', 'fake', 'down', fanout=True)
+ message.message_type = 'fake-message-type'
+
+ call_info = {}
+
+ def _fake_fanout_cast_to_server(*args, **kwargs):
+ call_info['topic'] = kwargs.get('topic')
+
+ self.stubs.Set(self.driver.intercell_rpcapi,
+ 'fanout_cast_to_server', _fake_fanout_cast_to_server)
+
+ self.driver.send_message_to_cell(cell_state, message)
+ self.assertEqual('cells.intercell42.fake-message-type',
+ call_info['topic'])
+
+ def test_process_message(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
+ message = messaging._BroadcastMessage(msg_runner,
+ self.ctxt, 'fake', 'fake', 'down', fanout=True)
+
+ call_info = {}
+
+ def _fake_message_from_json(json_message):
+ call_info['json_message'] = json_message
+ self.assertEqual(message.to_json(), json_message)
+ return message
+
+ def _fake_process():
+ call_info['process_called'] = True
+
+ self.stubs.Set(msg_runner, 'message_from_json',
+ _fake_message_from_json)
+ self.stubs.Set(message, 'process', _fake_process)
+
+ dispatcher.process_message(self.ctxt, message.to_json())
+ self.assertEqual(message.to_json(), call_info['json_message'])
+ self.assertTrue(call_info['process_called'])
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
new file mode 100644
index 000000000..5e045aca9
--- /dev/null
+++ b/nova/tests/cells/test_cells_rpcapi.py
@@ -0,0 +1,226 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells RPCAPI
+"""
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova.openstack.common import cfg
+from nova.openstack.common import rpc
+from nova import test
+
+CONF = cfg.CONF
+CONF.import_opt('topic', 'nova.cells.opts', group='cells')
+
+
+class CellsAPITestCase(test.TestCase):
+ """Test case for cells.api interfaces."""
+
+ def setUp(self):
+ super(CellsAPITestCase, self).setUp()
+ self.fake_topic = 'fake_topic'
+ self.fake_context = 'fake_context'
+ self.flags(topic=self.fake_topic, enable=True, group='cells')
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def _stub_rpc_method(self, rpc_method, result):
+ call_info = {}
+
+ def fake_rpc_method(ctxt, topic, msg, *args, **kwargs):
+ call_info['context'] = ctxt
+ call_info['topic'] = topic
+ call_info['msg'] = msg
+ return result
+
+ self.stubs.Set(rpc, rpc_method, fake_rpc_method)
+ return call_info
+
+ def _check_result(self, call_info, method, args, version=None):
+ if version is None:
+ version = self.cells_rpcapi.BASE_RPC_API_VERSION
+ self.assertEqual(self.fake_context, call_info['context'])
+ self.assertEqual(self.fake_topic, call_info['topic'])
+ self.assertEqual(method, call_info['msg']['method'])
+ self.assertEqual(version, call_info['msg']['version'])
+ self.assertEqual(args, call_info['msg']['args'])
+
+ def test_cast_compute_api_method(self):
+ fake_cell_name = 'fake_cell_name'
+ fake_method = 'fake_method'
+ fake_method_args = (1, 2)
+ fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
+
+ expected_method_info = {'method': fake_method,
+ 'method_args': fake_method_args,
+ 'method_kwargs': fake_method_kwargs}
+ expected_args = {'method_info': expected_method_info,
+ 'cell_name': fake_cell_name,
+ 'call': False}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.cast_compute_api_method(self.fake_context,
+ fake_cell_name, fake_method,
+ *fake_method_args, **fake_method_kwargs)
+ self._check_result(call_info, 'run_compute_api_method',
+ expected_args)
+
+ def test_call_compute_api_method(self):
+ fake_cell_name = 'fake_cell_name'
+ fake_method = 'fake_method'
+ fake_method_args = (1, 2)
+ fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
+ fake_response = 'fake_response'
+
+ expected_method_info = {'method': fake_method,
+ 'method_args': fake_method_args,
+ 'method_kwargs': fake_method_kwargs}
+ expected_args = {'method_info': expected_method_info,
+ 'cell_name': fake_cell_name,
+ 'call': True}
+
+ call_info = self._stub_rpc_method('call', fake_response)
+
+ result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
+ fake_cell_name, fake_method,
+ *fake_method_args, **fake_method_kwargs)
+ self._check_result(call_info, 'run_compute_api_method',
+ expected_args)
+ self.assertEqual(fake_response, result)
+
+ def test_schedule_run_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.schedule_run_instance(
+ self.fake_context, arg1=1, arg2=2, arg3=3)
+
+ expected_args = {'host_sched_kwargs': {'arg1': 1,
+ 'arg2': 2,
+ 'arg3': 3}}
+ self._check_result(call_info, 'schedule_run_instance',
+ expected_args)
+
+ def test_instance_update_at_top(self):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'other': 'moo'}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'security_groups': 'fake',
+ 'instance_type': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_update_at_top(
+ self.fake_context, fake_instance)
+
+ expected_args = {'instance': fake_instance}
+ self._check_result(call_info, 'instance_update_at_top',
+ expected_args)
+
+ def test_instance_destroy_at_top(self):
+ fake_instance = {'uuid': 'fake-uuid'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_destroy_at_top(
+ self.fake_context, fake_instance)
+
+ expected_args = {'instance': fake_instance}
+ self._check_result(call_info, 'instance_destroy_at_top',
+ expected_args)
+
+ def test_instance_delete_everywhere(self):
+ fake_instance = {'uuid': 'fake-uuid'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_delete_everywhere(
+ self.fake_context, fake_instance,
+ 'fake-type')
+
+ expected_args = {'instance': fake_instance,
+ 'delete_type': 'fake-type'}
+ self._check_result(call_info, 'instance_delete_everywhere',
+ expected_args)
+
+ def test_instance_fault_create_at_top(self):
+ fake_instance_fault = {'id': 2,
+ 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_fault_create_at_top(
+ self.fake_context, fake_instance_fault)
+
+ expected_args = {'instance_fault': fake_instance_fault}
+ self._check_result(call_info, 'instance_fault_create_at_top',
+ expected_args)
+
+ def test_bw_usage_update_at_top(self):
+ update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
+ 'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
+ 'fake_ctr_out')
+ update_kwargs = {'last_refreshed': 'fake_refreshed'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bw_usage_update_at_top(
+ self.fake_context, *update_args, **update_kwargs)
+
+ bw_update_info = {'uuid': 'fake_uuid',
+ 'mac': 'fake_mac',
+ 'start_period': 'fake_start_period',
+ 'bw_in': 'fake_bw_in',
+ 'bw_out': 'fake_bw_out',
+ 'last_ctr_in': 'fake_ctr_in',
+ 'last_ctr_out': 'fake_ctr_out',
+ 'last_refreshed': 'fake_refreshed'}
+
+ expected_args = {'bw_update_info': bw_update_info}
+ self._check_result(call_info, 'bw_usage_update_at_top',
+ expected_args)
+
+ def test_get_cell_info_for_neighbors(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.get_cell_info_for_neighbors(
+ self.fake_context)
+ self._check_result(call_info, 'get_cell_info_for_neighbors', {},
+ version='1.1')
+ self.assertEqual(result, 'fake_response')
+
+ def test_sync_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+ self.cells_rpcapi.sync_instances(self.fake_context,
+ project_id='fake_project', updated_since='fake_time',
+ deleted=True)
+
+ expected_args = {'project_id': 'fake_project',
+ 'updated_since': 'fake_time',
+ 'deleted': True}
+ self._check_result(call_info, 'sync_instances', expected_args,
+ version='1.1')
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
new file mode 100644
index 000000000..15b2571b5
--- /dev/null
+++ b/nova/tests/cells/test_cells_scheduler.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellsScheduler
+"""
+import time
+
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.cells import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
+
+
+class CellsSchedulerTestCase(test.TestCase):
+ """Test case for CellsScheduler class."""
+
+ def setUp(self):
+ super(CellsSchedulerTestCase, self).setUp()
+ fakes.init(self)
+ self.msg_runner = fakes.get_message_runner('api-cell')
+ self.scheduler = self.msg_runner.scheduler
+ self.state_manager = self.msg_runner.state_manager
+ self.my_cell_state = self.state_manager.get_my_state()
+ self.ctxt = context.RequestContext('fake', 'fake')
+ instance_uuids = []
+ for x in xrange(3):
+ instance_uuids.append(uuidutils.generate_uuid())
+ self.instance_uuids = instance_uuids
+ self.request_spec = {'instance_uuids': instance_uuids,
+ 'other': 'stuff'}
+
+ def test_create_instances_here(self):
+ # Just grab the first instance type
+ inst_type = db.instance_type_get(self.ctxt, 1)
+ image = {'properties': {}}
+ instance_props = {'hostname': 'meow',
+ 'display_name': 'moo',
+ 'image_ref': 'fake_image_ref',
+ 'user_id': self.ctxt.user_id,
+ 'project_id': self.ctxt.project_id}
+ request_spec = {'instance_type': inst_type,
+ 'image': image,
+ 'security_group': ['default'],
+ 'block_device_mapping': [],
+ 'instance_properties': instance_props,
+ 'instance_uuids': self.instance_uuids}
+
+ call_info = {'uuids': []}
+
+ def _fake_instance_update_at_top(_ctxt, instance):
+ call_info['uuids'].append(instance['uuid'])
+
+ self.stubs.Set(self.msg_runner, 'instance_update_at_top',
+ _fake_instance_update_at_top)
+
+ self.scheduler._create_instances_here(self.ctxt, request_spec)
+ self.assertEqual(self.instance_uuids, call_info['uuids'])
+
+ for instance_uuid in self.instance_uuids:
+ instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
+ self.assertEqual('meow', instance['hostname'])
+ self.assertEqual('moo', instance['display_name'])
+ self.assertEqual('fake_image_ref', instance['image_ref'])
+
+ def test_run_instance_selects_child_cell(self):
+ # Make sure there's no capacity info so we're sure to
+ # select a child cell
+ our_cell_info = self.state_manager.get_my_state()
+ our_cell_info.capacities = {}
+
+ call_info = {'times': 0}
+
+ orig_fn = self.msg_runner.schedule_run_instance
+
+ def msg_runner_schedule_run_instance(ctxt, target_cell,
+ host_sched_kwargs):
+ # This gets called twice. Once for our running it
+ # in this cell.. and then it'll get called when the
+ # child cell is picked. So, first time.. just run it
+ # like normal.
+ if not call_info['times']:
+ call_info['times'] += 1
+ return orig_fn(ctxt, target_cell, host_sched_kwargs)
+ call_info['ctxt'] = ctxt
+ call_info['target_cell'] = target_cell
+ call_info['host_sched_kwargs'] = host_sched_kwargs
+
+ self.stubs.Set(self.msg_runner, 'schedule_run_instance',
+ msg_runner_schedule_run_instance)
+
+ host_sched_kwargs = {'request_spec': self.request_spec}
+ self.msg_runner.schedule_run_instance(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+ child_cells = self.state_manager.get_child_cells()
+ self.assertIn(call_info['target_cell'], child_cells)
+
+ def test_run_instance_selects_current_cell(self):
+ # Make sure there's no child cells so that we will be
+ # selected
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+
+ def fake_create_instances_here(ctxt, request_spec):
+ call_info['ctxt'] = ctxt
+ call_info['request_spec'] = request_spec
+
+ def fake_rpc_run_instance(ctxt, **host_sched_kwargs):
+ call_info['host_sched_kwargs'] = host_sched_kwargs
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(self.scheduler.scheduler_rpcapi,
+ 'run_instance', fake_rpc_run_instance)
+
+ host_sched_kwargs = {'request_spec': self.request_spec,
+ 'other': 'stuff'}
+ self.msg_runner.schedule_run_instance(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.request_spec, call_info['request_spec'])
+ self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+
+ def test_run_instance_retries_when_no_cells_avail(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ host_sched_kwargs = {'request_spec': self.request_spec}
+
+ call_info = {'num_tries': 0, 'errored_uuids': []}
+
+ def fake_run_instance(message, host_sched_kwargs):
+ call_info['num_tries'] += 1
+ raise exception.NoCellsAvailable()
+
+ def fake_sleep(_secs):
+ return
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids'].append(instance_uuid)
+
+ self.stubs.Set(self.scheduler, '_run_instance', fake_run_instance)
+ self.stubs.Set(time, 'sleep', fake_sleep)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+
+ self.msg_runner.schedule_run_instance(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+
+ self.assertEqual(8, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
+
+ def test_run_instance_on_random_exception(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ host_sched_kwargs = {'request_spec': self.request_spec}
+
+ call_info = {'num_tries': 0,
+ 'errored_uuids1': [],
+ 'errored_uuids2': []}
+
+ def fake_run_instance(message, host_sched_kwargs):
+ call_info['num_tries'] += 1
+ raise test.TestingException()
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids1'].append(instance_uuid)
+
+ def fake_instance_update_at_top(ctxt, instance):
+ self.assertEqual(vm_states.ERROR, instance['vm_state'])
+ call_info['errored_uuids2'].append(instance['uuid'])
+
+ self.stubs.Set(self.scheduler, '_run_instance', fake_run_instance)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+ self.stubs.Set(self.msg_runner, 'instance_update_at_top',
+ fake_instance_update_at_top)
+
+ self.msg_runner.schedule_run_instance(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+ # Shouldn't retry
+ self.assertEqual(1, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
diff --git a/nova/tests/cells/test_cells_utils.py b/nova/tests/cells/test_cells_utils.py
new file mode 100644
index 000000000..84f60a796
--- /dev/null
+++ b/nova/tests/cells/test_cells_utils.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells Utility methods
+"""
+import inspect
+import random
+
+from nova.cells import utils as cells_utils
+from nova import db
+from nova import test
+
+
+class CellsUtilsTestCase(test.TestCase):
+ """Test case for Cells utility methods."""
+ def test_get_instances_to_sync(self):
+ fake_context = 'fake_context'
+
+ call_info = {'get_all': 0, 'shuffle': 0}
+
+ def random_shuffle(_list):
+ call_info['shuffle'] += 1
+
+ def instance_get_all_by_filters(context, filters,
+ sort_key, sort_order):
+ self.assertEqual(context, fake_context)
+ self.assertEqual(sort_key, 'deleted')
+ self.assertEqual(sort_order, 'asc')
+ call_info['got_filters'] = filters
+ call_info['get_all'] += 1
+ return ['fake_instance1', 'fake_instance2', 'fake_instance3']
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ instance_get_all_by_filters)
+ self.stubs.Set(random, 'shuffle', random_shuffle)
+
+ instances = cells_utils.get_instances_to_sync(fake_context)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 1)
+ self.assertEqual(call_info['got_filters'], {})
+ self.assertEqual(call_info['shuffle'], 0)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ shuffle=True)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 2)
+ self.assertEqual(call_info['got_filters'], {})
+ self.assertEqual(call_info['shuffle'], 1)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ updated_since='fake-updated-since')
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 3)
+ self.assertEqual(call_info['got_filters'],
+ {'changes-since': 'fake-updated-since'})
+ self.assertEqual(call_info['shuffle'], 1)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ project_id='fake-project',
+ updated_since='fake-updated-since', shuffle=True)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertTrue(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 4)
+ self.assertEqual(call_info['got_filters'],
+ {'changes-since': 'fake-updated-since',
+ 'project_id': 'fake-project'})
+ self.assertEqual(call_info['shuffle'], 2)
diff --git a/nova/tests/cert/test_rpcapi.py b/nova/tests/cert/test_rpcapi.py
index 58b07ff75..dce325354 100644
--- a/nova/tests/cert/test_rpcapi.py
+++ b/nova/tests/cert/test_rpcapi.py
@@ -20,12 +20,11 @@ Unit Tests for nova.cert.rpcapi
from nova.cert import rpcapi as cert_rpcapi
from nova import context
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class CertRpcAPITestCase(test.TestCase):
@@ -33,8 +32,12 @@ class CertRpcAPITestCase(test.TestCase):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
+ expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
- expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
+ expected_msg['version'] = expected_version
+
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
self.call_ctxt = None
self.call_topic = None
@@ -54,7 +57,7 @@ class CertRpcAPITestCase(test.TestCase):
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
- self.assertEqual(self.call_topic, FLAGS.cert_topic)
+ self.assertEqual(self.call_topic, CONF.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
@@ -84,3 +87,7 @@ class CertRpcAPITestCase(test.TestCase):
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
+
+ def test_get_backdoor_port(self):
+ self._test_cert_api('get_backdoor_port', host='fake_host',
+ version='1.1')
diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py
index 9c404fbc0..ab24bc7b9 100644
--- a/nova/tests/compute/fake_resource_tracker.py
+++ b/nova/tests/compute/fake_resource_tracker.py
@@ -19,7 +19,7 @@ from nova.compute import resource_tracker
class FakeResourceTracker(resource_tracker.ResourceTracker):
- """Version without a DB requirement"""
+ """Version without a DB requirement."""
def _create(self, context, values):
self.compute_node = values
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
index f631c1665..d908c0089 100644
--- a/nova/tests/compute/test_claims.py
+++ b/nova/tests/compute/test_claims.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for resource tracker claims"""
+"""Tests for resource tracker claims."""
import uuid
@@ -26,15 +26,27 @@ from nova import test
LOG = logging.getLogger(__name__)
+class DummyTracker(object):
+ icalled = False
+ rcalled = False
+
+ def abort_instance_claim(self, *args, **kwargs):
+ self.icalled = True
+
+ def abort_resize_claim(self, *args, **kwargs):
+ self.rcalled = True
+
+
class ClaimTestCase(test.TestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.resources = self._fake_resources()
+ self.tracker = DummyTracker()
def _claim(self, **kwargs):
instance = self._fake_instance(**kwargs)
- return claims.Claim(instance, None)
+ return claims.Claim(instance, self.tracker)
def _fake_instance(self, **kwargs):
instance = {
@@ -47,6 +59,18 @@ class ClaimTestCase(test.TestCase):
instance.update(**kwargs)
return instance
+ def _fake_instance_type(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'name': 'fakeitype',
+ 'memory_mb': 1,
+ 'vcpus': 1,
+ 'root_gb': 1,
+ 'ephemeral_gb': 2
+ }
+ instance_type.update(**kwargs)
+ return instance_type
+
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
@@ -109,17 +133,30 @@ class ClaimTestCase(test.TestCase):
self.assertFalse(claim.test(self.resources, limits))
def test_abort(self):
- instance = self._fake_instance(root_gb=10, ephemeral_gb=40)
+ claim = self._abort()
+ self.assertTrue(claim.tracker.icalled)
- def fake_abort(self):
- self._called = True
-
- self.stubs.Set(claims.Claim, 'abort', fake_abort)
+ def _abort(self):
claim = None
try:
- with claims.Claim(instance, None) as claim:
+ with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
- self.assertTrue(claim._called)
+ return claim
+
+
+class ResizeClaimTestCase(ClaimTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+ self.instance = self._fake_instance()
+
+ def _claim(self, **kwargs):
+ instance_type = self._fake_instance_type(**kwargs)
+ return claims.ResizeClaim(self.instance, instance_type, self.tracker)
+
+ def test_abort(self):
+ claim = self._abort()
+ self.assertTrue(claim.tracker.rcalled)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index a7b1b3061..0d9f67231 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -16,13 +16,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute service"""
+"""Tests for compute service."""
import base64
import copy
import datetime
import sys
import time
+import traceback
+import uuid
import mox
@@ -36,43 +38,47 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
-from nova.openstack.common import policy as common_policy
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
-from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
+from nova.tests import matchers
from nova import utils
+from nova.virt import fake
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
FAKE_IMAGE_REF = 'fake-image-ref'
+NODENAME = 'fakenode1'
+
def nop_report_driver_status(self):
pass
@@ -98,15 +104,19 @@ class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
+ notifier_api._reset_drivers()
+ self.addCleanup(notifier_api._reset_drivers)
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ fake.set_nodes([NODENAME])
+ self.flags(use_local=True, group='conductor')
+ self.compute = importutils.import_object(CONF.compute_manager)
# override tracker with a version that doesn't need the database:
- self.compute.resource_tracker = \
- fake_resource_tracker.FakeResourceTracker(self.compute.host,
- self.compute.driver)
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, NODENAME)
+ self.compute._resource_tracker_dict[NODENAME] = fake_rt
self.compute.update_available_resource(
context.get_admin_context())
@@ -117,12 +127,15 @@ class BaseTestCase(test.TestCase):
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
- return {'id': id, 'min_disk': None, 'min_ram': None,
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id',
- 'something_else': 'meow'}}
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
@@ -132,15 +145,16 @@ class BaseTestCase(test.TestCase):
fake_network.set_stub_network_methods(self.stubs)
def tearDown(self):
+ ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
- instances = db.instance_get_all(self.context.elevated())
- notifier_api._reset_drivers()
+ instances = db.instance_get_all(ctxt)
for instance in instances:
- db.instance_destroy(self.context.elevated(), instance['uuid'])
+ db.instance_destroy(ctxt, instance['uuid'])
+ fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -152,6 +166,7 @@ class BaseTestCase(test.TestCase):
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
@@ -167,11 +182,11 @@ class BaseTestCase(test.TestCase):
return db.instance_create(self.context, inst)
def _create_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance. Returns uuid"""
+ """Create a test instance. Returns uuid."""
return self._create_fake_instance(params, type_name=type_name)
def _create_instance_type(self, params=None):
- """Create a test instance type"""
+ """Create a test instance type."""
if not params:
params = {}
@@ -210,7 +225,7 @@ class ComputeTestCase(BaseTestCase):
fake_get_nw_info)
self.compute_api = compute.API()
# Just to make long lines short
- self.rt = self.compute.resource_tracker
+ self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -269,7 +284,7 @@ class ComputeTestCase(BaseTestCase):
@compute_manager.wrap_instance_fault
def failer(self2, context, instance_uuid):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst_uuid)
@@ -277,7 +292,7 @@ class ComputeTestCase(BaseTestCase):
self.assertFalse(called['fault_added'])
def test_create_instance_with_img_ref_associates_config_drive(self):
- """Make sure create associates a config drive."""
+ # Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
@@ -287,12 +302,12 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
instance = instances[0]
- self.assertTrue(instance.config_drive)
+ self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
- """Make sure create associates a config drive."""
+ # Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
@@ -302,12 +317,12 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
instance = instances[0]
- self.assertTrue(instance.config_drive)
+ self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
- """Default of memory limit=None is unlimited"""
+ # Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
@@ -352,13 +367,13 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_ram(self):
- """Test passing of oversubscribed ram policy from the scheduler."""
+ # Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -385,7 +400,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -404,7 +419,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_cpu(self):
- """Test passing of oversubscribed cpu policy from the scheduler."""
+ # Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -412,7 +427,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
@@ -453,13 +468,13 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_disk(self):
- """Test passing of oversubscribed disk policy from the scheduler."""
+ # Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
@@ -485,7 +500,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
@@ -502,6 +517,23 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
+ def test_create_instance_without_node_param(self):
+ instance = self._create_fake_instance({'node': None})
+
+ self.compute.run_instance(self.context, instance=instance)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertEqual(NODENAME, instance['node'])
+
+ def test_create_instance_no_image(self):
+ # Create instance with no image provided.
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance(params)
+ self.compute.run_instance(self.context, instance=instance)
+ self._assert_state({'vm_state': vm_states.ACTIVE,
+ 'task_state': None})
+
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
@@ -513,8 +545,8 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
instance = instances[0]
- self.assertEqual(instance.access_ip_v4, '192.168.1.100')
- self.assertEqual(instance.access_ip_v6, '2001:db8:0:1::1')
+ self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
+ self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1::1')
finally:
db.instance_destroy(self.context, instance['uuid'])
@@ -527,13 +559,13 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
instance = instances[0]
- self.assertFalse(instance.access_ip_v4)
- self.assertFalse(instance.access_ip_v6)
+ self.assertFalse(instance['access_ip_v4'])
+ self.assertFalse(instance['access_ip_v6'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
- """check the persistence of the ERROR(scheduling) state"""
+ # check the persistence of the ERROR(scheduling) state.
self._create_instance(params={'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
#check state is failed even after the periodic poll
@@ -542,7 +574,7 @@ class ComputeTestCase(BaseTestCase):
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
- """ block device mapping failure test.
+ """block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, keeping the task state
@@ -562,7 +594,7 @@ class ComputeTestCase(BaseTestCase):
'task_state': None})
def test_run_instance_spawn_fail(self):
- """ spawn failure test.
+ """spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, keeping the task state"""
@@ -580,14 +612,14 @@ class ComputeTestCase(BaseTestCase):
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
- """ spawn network deallocate test.
+ """spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated"""
instance = self._create_instance()
def fake(*args, **kwargs):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id="fake")
self.stubs.Set(self.compute.driver, 'spawn', fake)
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -599,7 +631,7 @@ class ComputeTestCase(BaseTestCase):
self.context, instance=instance)
def test_can_terminate_on_error_state(self):
- """Make sure that the instance can be terminated in ERROR state"""
+ # Make sure that the instance can be terminated in ERROR state.
#check failed to schedule --> terminate
instance = self._create_instance(params={'vm_state': vm_states.ERROR})
self.compute.terminate_instance(self.context, instance=instance)
@@ -610,7 +642,7 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
- """Make sure it is possible to run and terminate instance"""
+ # Make sure it is possible to run and terminate instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -663,6 +695,21 @@ class ComputeTestCase(BaseTestCase):
instance['uuid'])
self.assertEqual(len(bdms), 0)
+ def test_run_terminate_no_image(self):
+ """
+ Make sure instance started without image (from volume)
+ can be termintad without issues
+ """
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance(params)
+ self.compute.run_instance(self.context, instance=instance)
+ self._assert_state({'vm_state': vm_states.ACTIVE,
+ 'task_state': None})
+
+ self.compute.terminate_instance(self.context, instance=instance)
+ instances = db.instance_get_all(self.context)
+ self.assertEqual(len(instances), 0)
+
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -677,7 +724,9 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute._get_instance_nw_info(
mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(exception.NetworkNotFound())
+ mox.IgnoreArg()).AndRaise(
+ exception.NetworkNotFound(network_id='fake')
+ )
self.mox.ReplayAll()
self.compute.terminate_instance(self.context, instance=instance)
@@ -718,7 +767,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instances[0]['task_state'], 'deleting')
def test_run_terminate_timestamps(self):
- """Make sure timestamps are set for launched and destroyed"""
+ # Make sure timestamps are set for launched and destroyed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
@@ -736,7 +785,7 @@ class ComputeTestCase(BaseTestCase):
self.assert_(instance['deleted_at'] > terminate)
def test_stop(self):
- """Ensure instance can be stopped"""
+ # Ensure instance can be stopped.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -745,7 +794,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_start(self):
- """Ensure instance can be started"""
+ # Ensure instance can be started.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -756,8 +805,20 @@ class ComputeTestCase(BaseTestCase):
self.compute.start_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_stop_start_no_image(self):
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance(params)
+ self.compute.run_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ self.compute.stop_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_ON})
+ self.compute.start_instance(self.context, instance=instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
def test_rescue(self):
- """Ensure instance can be rescued and unrescued"""
+ # Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
@@ -789,8 +850,20 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
+ def test_rescue_no_image(self):
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance(params)
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance_uuid,
+ {"task_state": task_states.RESCUING})
+ self.compute.rescue_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance_uuid,
+ {"task_state": task_states.UNRESCUING})
+ self.compute.unrescue_instance(self.context, instance=instance)
+
def test_power_on(self):
- """Ensure instance can be powered on"""
+ # Ensure instance can be powered on.
called = {'power_on': False}
@@ -809,7 +882,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_power_off(self):
- """Ensure instance can be powered off"""
+ # Ensure instance can be powered off.
called = {'power_off': False}
@@ -828,7 +901,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_pause(self):
- """Ensure instance can be paused and unpaused"""
+ # Ensure instance can be paused and unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -840,7 +913,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend(self):
- """ensure instance can be suspended and resumed"""
+ # ensure instance can be suspended and resumed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -852,7 +925,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend_error(self):
- """Ensure vm_state is ERROR when suspend error occurs"""
+ # Ensure vm_state is ERROR when suspend error occurs.
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'suspend', fake)
@@ -869,7 +942,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild(self):
- """Ensure instance can be rebuilt"""
+ # Ensure instance can be rebuilt.
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -881,11 +954,27 @@ class ComputeTestCase(BaseTestCase):
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[])
+ self.compute.terminate_instance(self.context, instance=instance)
+
+ def test_rebuild_no_image(self):
+ # Ensure instance can be rebuilt when started with no image.
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance(params)
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ '', '', injected_files=[],
+ new_pass="new_password",
orig_sys_metadata=sys_metadata)
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_launch_time(self):
- """Ensure instance can be rebuilt"""
+ # Ensure instance can be rebuilt.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -900,105 +989,119 @@ class ComputeTestCase(BaseTestCase):
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
- new_pass="new_password")
- instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ new_pass="new_password",
+ bdms=[])
+ instance = db.instance_get_by_uuid(self.context, instance_uuid,)
self.assertEquals(cur_time, instance['launched_at'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
- def _stub_out_reboot(self, fake_net_info, fake_block_dev_info):
- def fake_reboot(driver, inst, net_info, reboot_type, block_dev_info):
- self.assertEqual(block_dev_info, fake_block_dev_info)
- self.assertEqual(net_info, fake_net_info)
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'legacy_nwinfo',
- lambda x: False)
- self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot)
+ def _test_reboot(self, soft, legacy_nwinfo_driver):
+ # This is a true unit test, so we don't need the network stubs.
+ fake_network.unset_stub_network_methods(self.stubs)
- def test_reboot_soft(self):
- """Ensure instance can be soft rebooted"""
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING})
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'legacy_nwinfo')
+ self.mox.StubOutWithMock(self.compute.driver, 'reboot')
+
+ instance = dict(uuid='fake-instance',
+ power_state='unknown')
+ updated_instance1 = dict(uuid='updated-instance1',
+ power_state='fake')
+ updated_instance2 = dict(uuid='updated-instance2',
+ power_state='fake')
+
+ fake_nw_model = network_model.NetworkInfo()
+ self.mox.StubOutWithMock(fake_nw_model, 'legacy')
+
+ fake_block_dev_info = 'fake_block_dev_info'
+ fake_power_state1 = 'fake_power_state1'
+ fake_power_state2 = 'fake_power_state2'
+ reboot_type = soft and 'SOFT' or 'HARD'
+
+ # Beginning of calls we expect.
+
+ # FIXME(comstud): I don't feel like the context needs to
+ # be elevated at all. Hopefully remove elevated from
+ # reboot_instance and remove the stub here in a future patch.
+ # econtext would just become self.context below then.
+ econtext = self.context.elevated()
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.context.elevated().AndReturn(econtext)
+
+ self.compute._get_instance_nw_info(econtext,
+ instance).AndReturn(
+ fake_nw_model)
+ self.compute._notify_about_instance_usage(econtext,
+ instance,
+ 'reboot.start')
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state1)
+ self.compute._instance_update(econtext, instance['uuid'],
+ power_state=fake_power_state1,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance1)
+
+ # Reboot should check the driver to see if legacy nwinfo is
+ # needed. If it is, the model's legacy() method should be
+ # called and the result passed to driver.reboot. If the
+ # driver wants the model, we pass the model.
+ self.compute.driver.legacy_nwinfo().AndReturn(legacy_nwinfo_driver)
+ if legacy_nwinfo_driver:
+ expected_nw_info = 'legacy-nwinfo'
+ fake_nw_model.legacy().AndReturn(expected_nw_info)
+ else:
+ expected_nw_info = fake_nw_model
+
+ # Annoying. driver.reboot is wrapped in a try/except, and
+ # doesn't re-raise. It eats exception generated by mox if
+ # this is called with the wrong args, so we have to hack
+ # around it.
+ reboot_call_info = {}
+ expected_call_info = {'args': (updated_instance1, expected_nw_info,
+ reboot_type, fake_block_dev_info),
+ 'kwargs': {}}
+
+ def fake_reboot(*args, **kwargs):
+ reboot_call_info['args'] = args
+ reboot_call_info['kwargs'] = kwargs
+
+ self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
+
+ # Power state should be updated again
+ self.compute._get_power_state(econtext,
+ updated_instance1).AndReturn(fake_power_state2)
+ self.compute._instance_update(econtext, updated_instance1['uuid'],
+ power_state=fake_power_state2,
+ task_state=None,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance2)
+ self.compute._notify_about_instance_usage(econtext,
+ updated_instance2,
+ 'reboot.end')
- reboot_type = "SOFT"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
+ self.mox.ReplayAll()
self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
block_device_info=fake_block_dev_info,
reboot_type=reboot_type)
+ self.assertEqual(expected_call_info, reboot_call_info)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
-
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
+ def test_reboot_soft(self):
+ self._test_reboot(True, False)
def test_reboot_hard(self):
- """Ensure instance can be hard rebooted"""
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- reboot_type = "HARD"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
- block_device_info=fake_block_dev_info,
- reboot_type=reboot_type)
-
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
+ self._test_reboot(False, False)
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
+ def test_reboot_soft_legacy_nwinfo_driver(self):
+ self._test_reboot(True, True)
- def test_reboot_nwinfo(self):
- """Ensure instance network info is rehydrated in reboot"""
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- result = {'was_instance': []}
-
- # NOTE(danms): Beware the dragons ahead:
- # Since the _legacy_nw_info() method in manager runs inside a
- # try..except block, we can't assert from here. Further, this
- # will be run more than once during the operation we're about
- # to fire off, which means we need to make sure that it doesn't
- # fail any of the times it is run. Hence the obscurity below.
- def fake_legacy_nw_info(network_info):
- result['was_instance'].append(
- isinstance(network_info, network_model.NetworkInfo))
- self.stubs.Set(self.compute, '_legacy_nw_info', fake_legacy_nw_info)
-
- fake_net_info = network_model.NetworkInfo([
- fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- fake_net_info_p = jsonutils.to_primitive(fake_net_info)
- fake_block_dev_info = {'foo': 'bar'}
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info_p,
- block_device_info=fake_block_dev_info,
- reboot_type="SOFT")
-
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
- self.assertFalse(False in result['was_instance'])
+ def test_reboot_hard_legacy_nwinfo_driver(self):
+ self._test_reboot(False, True)
def test_set_admin_password(self):
- """Ensure instance can have its admin password set"""
+ # Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1018,7 +1121,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_bad_state(self):
- """Test setting password while instance is rebuilding."""
+ # Test setting password while instance is rebuilding.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {
@@ -1049,7 +1152,7 @@ class ComputeTestCase(BaseTestCase):
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
expected_task_state):
- """Ensure expected exception is raised if set_admin_password fails"""
+ """Ensure expected exception is raised if set_admin_password fails."""
def fake_sleep(_time):
pass
@@ -1106,7 +1209,7 @@ class ComputeTestCase(BaseTestCase):
None)
def test_inject_file(self):
- """Ensure we can write a file to an instance"""
+ # Ensure we can write a file to an instance.
called = {'inject': False}
def fake_driver_inject_file(self2, instance, path, contents):
@@ -1125,7 +1228,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_inject_network_info(self):
- """Ensure we can inject network info"""
+ # Ensure we can inject network info.
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
@@ -1141,7 +1244,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_reset_network(self):
- """Ensure we can reset networking on an instance"""
+ # Ensure we can reset networking on an instance.
called = {'count': 0}
def fake_driver_reset_network(self, instance):
@@ -1160,7 +1263,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot(self):
- """Ensure instance can be snapshotted"""
+ # Ensure instance can be snapshotted.
instance = jsonutils.to_primitive(self._create_fake_instance())
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance=instance)
@@ -1169,8 +1272,18 @@ class ComputeTestCase(BaseTestCase):
self.compute.snapshot_instance(self.context, name, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_snapshot_no_image(self):
+ params = {'image_ref': ''}
+ name = "myfakesnapshot"
+ instance = self._create_fake_instance(params)
+ self.compute.run_instance(self.context, instance=instance)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.IMAGE_SNAPSHOT})
+ self.compute.snapshot_instance(self.context, name, instance=instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
def test_snapshot_fails(self):
- """Ensure task_state is set to None if snapshot fails"""
+ # Ensure task_state is set to None if snapshot fails.
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
@@ -1187,7 +1300,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def _assert_state(self, state_dict):
- """Assert state of VM is equal to state passed as parameter"""
+ """Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 1)
@@ -1201,7 +1314,7 @@ class ComputeTestCase(BaseTestCase):
instances[0]['power_state'])
def test_console_output(self):
- """Make sure we can get console output from instance"""
+ # Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1211,7 +1324,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_console_output_tail(self):
- """Make sure we can get console output from instance"""
+ # Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1221,7 +1334,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_novnc_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1233,7 +1346,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_xvpvnc_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1243,7 +1356,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_vnc_console_type(self):
- """Raise useful error if console type is an unrecognised string"""
+ # Raise useful error if console type is an unrecognised string.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1253,7 +1366,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
- """Raise useful error is console type is None"""
+ # Raise useful error is console type is None.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1263,7 +1376,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_diagnostics(self):
- """Make sure we can get diagnostics for an instance."""
+ # Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
@@ -1330,7 +1443,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_usage_notification(self):
- """Ensure run instance generates appropriate usage notification"""
+ # Ensure run instance generates appropriate usage notification.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -1347,7 +1460,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['image_name'], 'fake_name')
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], inst_ref.uuid)
+ self.assertEquals(payload['instance_id'], inst_ref['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
@@ -1356,13 +1469,13 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue(payload['launched_at'])
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_terminate_usage_notification(self):
- """Ensure terminate_instance generates correct usage notification"""
+ # Ensure terminate_instance generates correct usage notification.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1396,12 +1509,12 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue('deleted_at' in payload)
- self.assertEqual(payload['deleted_at'], str(cur_time))
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
- """Ensure failure when running an instance that already exists"""
+ # Ensure failure when running an instance that already exists.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.Invalid,
@@ -1410,8 +1523,29 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_run_instance_queries_macs(self):
+ # run_instance should ask the driver for node mac addresses and pass
+ # that to the network_api in use.
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ macs = set(['01:23:45:67:89:ab'])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=macs).AndReturn(
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True))
+ self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
+ self.compute.driver.macs_for_instance(instance).AndReturn(macs)
+ self.mox.ReplayAll()
+ self.compute.run_instance(self.context, instance=instance)
+
def test_instance_set_to_error_on_uncaught_exception(self):
- """Test that instance is set to error state when exception is raised"""
+ # Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute.network_api,
@@ -1420,7 +1554,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False).AndRaise(rpc_common.RemoteError())
+ vpn=False,
+ macs=None).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
@@ -1469,12 +1604,12 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
- """When a spawn fails the network must be deallocated"""
+ # When a spawn fails the network must be deallocated.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, "_setup_block_device_mapping")
self.compute._setup_block_device_mapping(
- mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(rpc.common.RemoteError('', '', ''))
self.mox.ReplayAll()
@@ -1486,7 +1621,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_lock(self):
- """ensure locked instance cannot be changed"""
+ # ensure locked instance cannot be changed.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -1569,7 +1704,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance["task_state"], post_task_state)
def test_state_revert(self):
- """ensure that task_state is reverted after a failed operation"""
+ # ensure that task_state is reverted after a failed operation.
actions = [
("reboot_instance", task_states.REBOOTING),
("stop_instance", task_states.POWERING_OFF),
@@ -1606,7 +1741,7 @@ class ComputeTestCase(BaseTestCase):
self._test_state_revert(*operation)
def _ensure_quota_reservations_committed(self):
- """Mock up commit of quota reservations"""
+ """Mock up commit of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations)
@@ -1614,7 +1749,7 @@ class ComputeTestCase(BaseTestCase):
return reservations
def _ensure_quota_reservations_rolledback(self):
- """Mock up rollback of quota reservations"""
+ """Mock up rollback of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations)
@@ -1622,12 +1757,21 @@ class ComputeTestCase(BaseTestCase):
return reservations
def test_finish_resize(self):
- """Contrived test to ensure finish_resize doesn't raise anything"""
+ # Contrived test to ensure finish_resize doesn't raise anything.
def fake(*args, **kwargs):
pass
+ def fake_migration_update(context, id, values):
+ # Ensure instance status updates is after the migration finish
+ migration_ref = db.migration_get(context, id)
+ instance_uuid = migration_ref['instance_uuid']
+ instance = db.instance_get_by_uuid(context, instance_uuid)
+ self.assertFalse(instance['vm_state'] == vm_states.RESIZED)
+ self.assertEqual(instance['task_state'], task_states.RESIZE_FINISH)
+
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+ self.stubs.Set(db, 'migration_update', fake_migration_update)
reservations = self._ensure_quota_reservations_committed()
@@ -1649,7 +1793,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
- """Make sure we don't leave the instance in RESIZE on error"""
+ # Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs):
raise test.TestingException()
@@ -1683,7 +1827,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_rebuild_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1711,12 +1855,13 @@ class ComputeTestCase(BaseTestCase):
image_ref, new_image_ref,
injected_files=[],
new_pass=password,
- orig_sys_metadata=orig_sys_metadata)
+ orig_sys_metadata=orig_sys_metadata,
+ bdms=[])
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
- image_ref_url = utils.generate_image_url(image_ref)
- new_image_ref_url = utils.generate_image_url(new_image_ref)
+ image_ref_url = glance.generate_image_url(image_ref)
+ new_image_ref_url = glance.generate_image_url(new_image_ref)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
msg = test_notifier.NOTIFICATIONS[0]
@@ -1743,13 +1888,13 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
- self.assertEqual(payload['launched_at'], str(cur_time))
+ self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
self.assertEquals(payload['image_ref_url'], new_image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_finish_resize_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1759,21 +1904,23 @@ class ComputeTestCase(BaseTestCase):
new_type_id = new_type['id']
self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
- db.instance_update(self.context, instance["uuid"],
+ new_instance = db.instance_update(self.context, instance['uuid'],
+ {'host': 'foo'})
+ new_instance = jsonutils.to_primitive(new_instance)
+ db.instance_update(self.context, new_instance["uuid"],
{"task_state": task_states.RESIZE_PREP})
- self.compute.prep_resize(self.context, instance=instance,
+ self.compute.prep_resize(self.context, instance=new_instance,
instance_type=new_type, image={})
migration_ref = db.migration_get_by_instance_and_status(
- self.context.elevated(), instance['uuid'], 'pre-migrating')
- self.compute.resize_instance(self.context, instance=instance,
+ self.context.elevated(), new_instance['uuid'], 'pre-migrating')
+ self.compute.resize_instance(self.context, instance=new_instance,
migration=migration_ref, image={}, instance_type=new_type)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
- disk_info={}, image={}, instance=instance)
+ disk_info={}, image={}, instance=new_instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
msg = test_notifier.NOTIFICATIONS[0]
@@ -1786,20 +1933,20 @@ class ComputeTestCase(BaseTestCase):
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], instance['uuid'])
+ self.assertEquals(payload['instance_id'], new_instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.small')
self.assertEquals(str(payload['instance_type_id']), str(new_type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
- self.assertEqual(payload['launched_at'], str(cur_time))
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(instance))
+ instance=jsonutils.to_primitive(new_instance))
def test_resize_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1809,12 +1956,14 @@ class ComputeTestCase(BaseTestCase):
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
- db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
+ new_instance = db.instance_update(self.context, instance['uuid'],
+ {'host': 'foo'})
+ new_instance = jsonutils.to_primitive(new_instance)
instance_type = instance_types.get_default_instance_type()
- self.compute.prep_resize(self.context, instance=instance,
+ self.compute.prep_resize(self.context, instance=new_instance,
instance_type=instance_type, image={})
db.migration_get_by_instance_and_status(self.context.elevated(),
- instance['uuid'],
+ new_instance['uuid'],
'pre-migrating')
self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
@@ -1831,19 +1980,21 @@ class ComputeTestCase(BaseTestCase):
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], instance['uuid'])
+ self.assertEquals(payload['instance_id'], new_instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
- image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance=instance)
+ self.compute.terminate_instance(self.context, instance=new_instance)
- def test_prep_resize_instance_migration_error(self):
- """Ensure prep_resize raise a migration error"""
+ def test_prep_resize_instance_migration_error_on_same_host(self):
+ """Ensure prep_resize raise a migration error if destination is set on
+ the same source host and allow_resize_to_same_host is false
+ """
self.flags(host="foo", allow_resize_to_same_host=False)
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -1862,8 +2013,28 @@ class ComputeTestCase(BaseTestCase):
reservations=reservations)
self.compute.terminate_instance(self.context, instance=new_instance)
+ def test_prep_resize_instance_migration_error_on_none_host(self):
+ """Ensure prep_resize raise a migration error if destination host is
+ not defined
+ """
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ reservations = self._ensure_quota_reservations_rolledback()
+
+ self.compute.run_instance(self.context, instance=instance)
+ new_instance = db.instance_update(self.context, instance['uuid'],
+ {'host': None})
+ new_instance = jsonutils.to_primitive(new_instance)
+ instance_type = instance_types.get_default_instance_type()
+
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=new_instance,
+ instance_type=instance_type, image={},
+ reservations=reservations)
+ self.compute.terminate_instance(self.context, instance=new_instance)
+
def test_resize_instance_driver_error(self):
- """Ensure instance status set to Error on resize error"""
+ # Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs):
raise test.TestingException()
@@ -1877,57 +2048,66 @@ class ComputeTestCase(BaseTestCase):
reservations = self._ensure_quota_reservations_rolledback()
self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
- self.compute.prep_resize(self.context, instance=instance,
+ new_instance = db.instance_update(self.context, instance['uuid'],
+ {'host': 'foo'})
+ new_instance = jsonutils.to_primitive(new_instance)
+ self.compute.prep_resize(self.context, instance=new_instance,
instance_type=instance_type, image={},
reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(
- self.context.elevated(), instance['uuid'], 'pre-migrating')
+ self.context.elevated(), new_instance['uuid'], 'pre-migrating')
- db.instance_update(self.context, instance['uuid'],
+ db.instance_update(self.context, new_instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
#verify
self.assertRaises(test.TestingException, self.compute.resize_instance,
- self.context, instance=instance,
+ self.context, instance=new_instance,
migration=migration_ref, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ instance = db.instance_get_by_uuid(self.context, new_instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_resize_instance(self):
- """Ensure instance can be migrated/resized"""
+ # Ensure instance can be migrated/resized.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'host': 'foo'})
- self.compute.prep_resize(self.context, instance=instance,
+ new_instance = db.instance_update(self.context, instance['uuid'],
+ {'host': 'foo'})
+ new_instance = jsonutils.to_primitive(new_instance)
+ self.compute.prep_resize(self.context, instance=new_instance,
instance_type=instance_type, image={})
migration_ref = db.migration_get_by_instance_and_status(
- self.context.elevated(), instance['uuid'], 'pre-migrating')
- db.instance_update(self.context, instance['uuid'],
+ self.context.elevated(), new_instance['uuid'], 'pre-migrating')
+ db.instance_update(self.context, new_instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
- self.compute.resize_instance(self.context, instance=instance,
+ self.compute.resize_instance(self.context, instance=new_instance,
migration=migration_ref, image={},
instance_type=jsonutils.to_primitive(instance_type))
- inst = db.instance_get_by_uuid(self.context, instance['uuid'])
+ inst = db.instance_get_by_uuid(self.context, new_instance['uuid'])
self.assertEqual(migration_ref['dest_compute'], inst['host'])
self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(instance))
+ instance=jsonutils.to_primitive(inst))
def test_finish_revert_resize(self):
- """Ensure that the flavor is reverted to the original on revert"""
+ # Ensure that the flavor is reverted to the original on revert.
def fake(*args, **kwargs):
pass
+ def fake_finish_revert_migration_driver(*args, **kwargs):
+ # Confirm the instance uses the old type in finish_revert_resize
+ inst = args[0]
+ self.assertEqual(inst['instance_type']['flavorid'], '1')
+
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
- self.stubs.Set(self.compute.driver, 'finish_revert_migration', fake)
+ self.stubs.Set(self.compute.driver, 'finish_revert_migration',
+ fake_finish_revert_migration_driver)
reservations = self._ensure_quota_reservations_committed()
@@ -2027,7 +2207,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_instance_handles_migration_error(self):
- """Ensure vm_state is ERROR when error occurs"""
+ # Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
@@ -2040,7 +2220,9 @@ class ComputeTestCase(BaseTestCase):
instance_type = instance_types.get_default_instance_type()
self.compute.run_instance(self.context, instance=inst_ref)
- db.instance_update(self.context, inst_ref['uuid'], {'host': 'foo'})
+ inst_ref = db.instance_update(self.context, inst_ref['uuid'],
+ {'host': 'foo'})
+ inst_ref = jsonutils.to_primitive(inst_ref)
self.compute.prep_resize(self.context, instance=inst_ref,
instance_type=instance_type,
image={}, reservations=reservations)
@@ -2059,25 +2241,30 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_check_can_live_migrate_source_works_correctly(self):
- """Confirm check_can_live_migrate_source works on positive path"""
+ # Confirm check_can_live_migrate_source works on positive path.
+ def fake_method(*args, **kwargs):
+ return {}
+ self.stubs.Set(self.compute.driver, 'check_can_live_migrate_source',
+ fake_method)
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
self.mox.StubOutWithMock(db, 'instance_get')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_can_live_migrate_source')
-
dest_check_data = {"test": "data"}
- self.compute.driver.check_can_live_migrate_source(self.context,
- inst_ref,
- dest_check_data)
self.mox.ReplayAll()
- self.compute.check_can_live_migrate_source(self.context,
- dest_check_data=dest_check_data, instance=inst_ref)
+ ret = self.compute.check_can_live_migrate_source(self.context,
+ dest_check_data=dest_check_data,
+ instance=inst_ref)
+ self.assertTrue(type(ret) == dict)
def test_check_can_live_migrate_destination_works_correctly(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
+ # Confirm check_can_live_migrate_destination works on positive path.
+ def fake_method(*args, **kwargs):
+ return {}
+ self.stubs.Set(self.compute.compute_rpcapi,
+ 'check_can_live_migrate_source',
+ fake_method)
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
compute_info = {"compute": "info"}
@@ -2086,16 +2273,14 @@ class ComputeTestCase(BaseTestCase):
'_get_compute_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
- dest_check_data = {"test": "data"}
+ dest_check_data = {"test": "data", "migrate_data": {"test": "data"}}
self.compute._get_compute_info(
self.context, inst_ref['host']).AndReturn(compute_info)
self.compute._get_compute_info(
- self.context, FLAGS.host).AndReturn(compute_info)
+ self.context, CONF.host).AndReturn(compute_info)
self.compute.driver.check_can_live_migrate_destination(self.context,
inst_ref,
compute_info, compute_info,
@@ -2106,12 +2291,13 @@ class ComputeTestCase(BaseTestCase):
self.context, dest_check_data)
self.mox.ReplayAll()
- self.compute.check_can_live_migrate_destination(self.context,
+ ret = self.compute.check_can_live_migrate_destination(self.context,
block_migration=True, disk_over_commit=False,
instance=inst_ref)
+ self.assertTrue(type(ret) == dict)
+ self.assertTrue("test" in ret)
def test_check_can_live_migrate_destination_fails_dest_check(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
compute_info = {"compute": "info"}
@@ -2124,7 +2310,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._get_compute_info(
self.context, inst_ref['host']).AndReturn(compute_info)
self.compute._get_compute_info(
- self.context, FLAGS.host).AndReturn(compute_info)
+ self.context, CONF.host).AndReturn(compute_info)
self.compute.driver.check_can_live_migrate_destination(self.context,
inst_ref,
compute_info, compute_info,
@@ -2137,7 +2323,7 @@ class ComputeTestCase(BaseTestCase):
disk_over_commit=False, instance=inst_ref)
def test_check_can_live_migrate_destination_fails_source(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
+ # Confirm check_can_live_migrate_destination works on positive path.
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
compute_info = {"compute": "info"}
@@ -2155,7 +2341,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._get_compute_info(
self.context, inst_ref['host']).AndReturn(compute_info)
self.compute._get_compute_info(
- self.context, FLAGS.host).AndReturn(compute_info)
+ self.context, CONF.host).AndReturn(compute_info)
self.compute.driver.check_can_live_migrate_destination(self.context,
inst_ref,
compute_info, compute_info,
@@ -2172,7 +2358,7 @@ class ComputeTestCase(BaseTestCase):
disk_over_commit=False, instance=inst_ref)
def test_pre_live_migration_instance_has_no_fixed_ip(self):
- """Confirm raising exception if instance doesn't have fixed_ip."""
+ # Confirm raising exception if instance doesn't have fixed_ip.
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -2182,7 +2368,7 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
def test_pre_live_migration_works_correctly(self):
- """Confirm setup_compute_volume is called when volume is mounted."""
+ # Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
@@ -2199,6 +2385,7 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
+ mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute.driver,
'ensure_filtering_rules_for_instance')
@@ -2207,83 +2394,72 @@ class ComputeTestCase(BaseTestCase):
# start test
self.mox.ReplayAll()
- ret = self.compute.pre_live_migration(c, instance=instance)
+ migrate_data = {'is_shared_storage': False}
+ ret = self.compute.pre_live_migration(c, instance=instance,
+ block_migration=False,
+ migrate_data=migrate_data)
self.assertEqual(ret, None)
# cleanup
db.instance_destroy(c, instance['uuid'])
- def test_live_migration_dest_raises_exception(self):
- """Confirm exception when pre_live_migration fails."""
- # creating instance testdata
- instance_ref = self._create_fake_instance({'host': 'dummy'})
- instance = jsonutils.to_primitive(instance_ref)
- inst_uuid = instance['uuid']
- inst_id = instance['id']
-
+ def test_live_migration_exception_rolls_back(self):
+ # Confirm exception when pre_live_migration fails.
c = context.get_admin_context()
- topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
-
- # creating volume testdata
- volume_id = 'fake'
- values = {'instance_uuid': inst_uuid, 'device_name': '/dev/vdc',
- 'delete_on_termination': False, 'volume_id': volume_id}
- db.block_device_mapping_create(c, values)
- def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
-
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ src_host = 'fake-src-host'
+ dest_host = 'fake-dest-host'
+ instance = dict(uuid='fake_instance', host=src_host,
+ name='fake-name')
+ updated_instance = 'fake_updated_instance'
+ fake_bdms = [dict(volume_id='vol1-id'), dict(volume_id='vol2-id')]
# creating mocks
self.mox.StubOutWithMock(rpc, 'call')
-
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
- self.compute.driver.get_instance_disk_info(instance['name'])
-
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
- self.compute.compute_rpcapi.pre_live_migration(c,
- mox.IsA(instance), True, None, instance['host']).AndRaise(
- rpc.common.RemoteError('', '', ''))
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdms')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'remove_volume_connection')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'rollback_live_migration_at_destination')
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.MIGRATING})
- # mocks for rollback
- rpc.call(c, 'network', {'method': 'setup_networks_on_host',
- 'args': {'instance_id': inst_id,
- 'host': self.compute.host,
- 'teardown': False},
- 'version': '1.0'}, None)
- rpcinst = jsonutils.to_primitive(
- db.instance_get_by_uuid(self.context, instance['uuid']))
- rpc.call(c, topic,
- {"method": "remove_volume_connection",
- "args": {'instance': rpcinst,
- 'volume_id': volume_id},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
- None)
- rpc.cast(c, topic,
- {"method": "rollback_live_migration_at_destination",
- "args": {'instance': rpcinst},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.compute.driver.get_instance_disk_info(
+ instance['name']).AndReturn('fake_disk')
+ self.compute.compute_rpcapi.pre_live_migration(c,
+ instance, True, 'fake_disk', dest_host,
+ None).AndRaise(test.TestingException())
+
+ self.compute._instance_update(c, instance['uuid'],
+ host=src_host, vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING).AndReturn(
+ updated_instance)
+ self.compute.network_api.setup_networks_on_host(c,
+ updated_instance, self.compute.host)
+ self.compute._get_instance_volume_bdms(c,
+ updated_instance).AndReturn(fake_bdms)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, updated_instance, 'vol1-id', dest_host)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, updated_instance, 'vol2-id', dest_host)
+ self.compute.compute_rpcapi.rollback_live_migration_at_destination(
+ c, updated_instance, dest_host)
# start test
self.mox.ReplayAll()
- self.assertRaises(rpc_common.RemoteError,
+ self.assertRaises(test.TestingException,
self.compute.live_migration,
- c, dest=instance['host'], block_migration=True,
- instance=rpcinst)
-
- # cleanup
- for bdms in db.block_device_mapping_get_all_by_instance(
- c, inst_uuid):
- db.block_device_mapping_destroy(c, bdms['id'])
- db.instance_destroy(c, inst_uuid)
+ c, dest=dest_host, block_migration=True,
+ instance=instance)
def test_live_migration_works_correctly(self):
- """Confirm live_migration() works as expected correctly."""
+ # Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance({'host': 'dummy'})
@@ -2291,35 +2467,77 @@ class ComputeTestCase(BaseTestCase):
inst_id = instance_ref['id']
instance = jsonutils.to_primitive(db.instance_get(c, inst_id))
-
- # create
- self.mox.StubOutWithMock(rpc, 'call')
- topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
- rpc.call(c, topic,
- {"method": "pre_live_migration",
- "args": {'instance': instance,
- 'block_migration': False,
- 'disk': None},
- "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
- None)
-
# start test
self.mox.ReplayAll()
+ migrate_data = {'is_shared_storage': False}
ret = self.compute.live_migration(c, dest=instance['host'],
- instance=instance)
+ instance=instance,
+ migrate_data=migrate_data)
self.assertEqual(ret, None)
# cleanup
db.instance_destroy(c, inst_uuid)
+ def test_post_live_migration_no_shared_storage_working_correctly(self):
+ """Confirm post_live_migration() works as expected correctly
+ for non shared storage migration.
+ """
+ # Create stubs
+ result = {}
+
+ def fakedestroy(*args, **kwargs):
+ result['destroyed'] = True
+ self.stubs.Set(self.compute.driver, 'destroy', fakedestroy)
+ dest = 'desthost'
+ srchost = self.compute.host
+
+ # creating testdata
+ c = context.get_admin_context()
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance({
+ 'host': srchost,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED}))
+ inst_uuid = inst_ref['uuid']
+ inst_id = inst_ref['id']
+
+ db.instance_update(c, inst_uuid,
+ {'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(inst_ref, [])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_start')
+ migration = {'source_compute': srchost, 'dest_compute': dest, }
+ self.compute.network_api.migrate_instance_start(c, inst_ref, migration)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
+ {"method": "post_live_migration_at_destination",
+ "args": {'instance': inst_ref, 'block_migration': False},
+ "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
+ None)
+ rpc.call(c, 'network', {'method': 'setup_networks_on_host',
+ 'args': {'instance_id': inst_id,
+ 'host': self.compute.host,
+ 'teardown': True},
+ 'version': '1.0'}, None)
+ # start test
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_storage': False}
+ self.compute._post_live_migration(c, inst_ref, dest,
+ migrate_data=migrate_data)
+ self.assertTrue('destroyed' in result)
+ self.assertTrue(result['destroyed'] == True)
+
def test_post_live_migration_working_correctly(self):
- """Confirm post_live_migration() works as expected correctly."""
+ # Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
- flo_addr = '1.2.1.2'
+ srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
inst_ref = jsonutils.to_primitive(self._create_fake_instance({
+ 'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED}))
inst_uuid = inst_ref['uuid']
@@ -2328,17 +2546,17 @@ class ComputeTestCase(BaseTestCase):
db.instance_update(c, inst_uuid,
{'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
- fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
- 'instance_uuid': inst_ref['uuid']})
- fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
- db.floating_ip_create(c, {'address': flo_addr,
- 'fixed_ip_id': fix_ref['id']})
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_start')
+ migration = {'source_compute': srchost,
+ 'dest_compute': dest, }
+ self.compute.network_api.migrate_instance_start(c, inst_ref, migration)
self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, rpc.queue_get_for(c, FLAGS.compute_topic, dest),
+ rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
{"method": "post_live_migration_at_destination",
"args": {'instance': inst_ref, 'block_migration': False},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
@@ -2355,17 +2573,54 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute._post_live_migration(c, inst_ref, dest)
- # make sure floating ips are rewritten to destinatioin hostname.
- flo_refs = db.floating_ip_get_all_by_host(c, dest)
- self.assertTrue(flo_refs)
- self.assertEqual(flo_refs[0]['address'], flo_addr)
+ def test_post_live_migration_at_destination(self):
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'post_live_migration_at_destination')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+
+ params = {'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED, }
+ instance = jsonutils.to_primitive(self._create_fake_instance(params))
- # cleanup
- db.instance_destroy(c, inst_uuid)
- db.floating_ip_destroy(c, flo_addr)
+ admin_ctxt = context.get_admin_context()
+ instance = db.instance_get_by_uuid(admin_ctxt, instance['uuid'])
+
+ self.compute.network_api.setup_networks_on_host(admin_ctxt, instance,
+ self.compute.host)
+ migration = {'source_compute': instance['host'],
+ 'dest_compute': self.compute.host, }
+ self.compute.network_api.migrate_instance_finish(admin_ctxt,
+ instance, migration)
+ fake_net_info = []
+ self.compute.driver.post_live_migration_at_destination(admin_ctxt,
+ instance,
+ fake_net_info,
+ False)
+ self.compute._get_power_state(admin_ctxt, instance).AndReturn(
+ 'fake_power_state')
+
+ updated_instance = 'fake_updated_instance'
+ self.compute._instance_update(admin_ctxt, instance['uuid'],
+ host=self.compute.host,
+ power_state='fake_power_state',
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING).AndReturn(
+ updated_instance)
+ self.compute.network_api.setup_networks_on_host(admin_ctxt,
+ updated_instance, self.compute.host)
+
+ self.mox.ReplayAll()
+
+ self.compute.post_live_migration_at_destination(admin_ctxt, instance)
def test_run_kill_vm(self):
- """Detect when a vm is terminated behind the scenes"""
+ # Detect when a vm is terminated behind the scenes.
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
@@ -2377,7 +2632,7 @@ class ComputeTestCase(BaseTestCase):
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
- instance_name = instances[0].name
+ instance_name = instances[0]['name']
self.compute.driver.test_remove_vm(instance_name)
# Force the compute manager to do its periodic poll
@@ -2391,7 +2646,7 @@ class ComputeTestCase(BaseTestCase):
def test_add_instance_fault(self):
exc_info = None
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('test'))
@@ -2419,7 +2674,7 @@ class ComputeTestCase(BaseTestCase):
def test_add_instance_fault_with_remote_error(self):
exc_info = None
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('Remote error'))
@@ -2448,7 +2703,7 @@ class ComputeTestCase(BaseTestCase):
def test_add_instance_fault_user_error(self):
exc_info = None
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
@@ -2474,7 +2729,7 @@ class ComputeTestCase(BaseTestCase):
user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
expected = {
@@ -2505,10 +2760,10 @@ class ComputeTestCase(BaseTestCase):
self.flags(running_deleted_instance_timeout=3600,
running_deleted_instance_action='reap')
- self.mox.StubOutWithMock(self.compute.db, "instance_get_all_by_host")
- self.compute.db.instance_get_all_by_host(admin_context,
- self.compute.host
- ).AndReturn([instance])
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ "instance_get_all_by_host")
+ self.compute.conductor_api.instance_get_all_by_host(
+ admin_context, self.compute.host).AndReturn([instance])
bdms = []
@@ -2530,29 +2785,55 @@ class ComputeTestCase(BaseTestCase):
self.compute.driver.list_instances().AndReturn(['herp', 'derp'])
self.compute.host = 'host'
- instance1 = mox.MockAnything()
- instance1.name = 'herp'
- instance1.deleted = True
- instance1.deleted_at = "sometimeago"
+ instance1 = {}
+ instance1['name'] = 'herp'
+ instance1['deleted'] = True
+ instance1['deleted_at'] = "sometimeago"
- instance2 = mox.MockAnything()
- instance2.name = 'derp'
- instance2.deleted = False
- instance2.deleted_at = None
+ instance2 = {}
+ instance2['name'] = 'derp'
+ instance2['deleted'] = False
+ instance2['deleted_at'] = None
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
- FLAGS.running_deleted_instance_timeout).AndReturn(True)
+ CONF.running_deleted_instance_timeout).AndReturn(True)
- self.mox.StubOutWithMock(self.compute.db, "instance_get_all_by_host")
- self.compute.db.instance_get_all_by_host('context',
- 'host').AndReturn(
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ "instance_get_all_by_host")
+ self.compute.conductor_api.instance_get_all_by_host('context',
+ 'host').AndReturn(
[instance1,
instance2])
self.mox.ReplayAll()
val = self.compute._running_deleted_instances('context')
self.assertEqual(val, [instance1])
+ def test_get_instance_nw_info(self):
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ fake_instance = 'fake-instance'
+ fake_nw_info = network_model.NetworkInfo()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'get_instance_nw_info')
+ self.mox.StubOutWithMock(fake_nw_info, 'json')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_info_cache_update')
+
+ self.compute.network_api.get_instance_nw_info(self.context,
+ fake_instance, update_cache=False).AndReturn(fake_nw_info)
+ fake_nw_info.json().AndReturn('fake-nw-info')
+ expected_cache = {'network_info': 'fake-nw-info'}
+ self.compute.conductor_api.instance_info_cache_update(self.context,
+ fake_instance, expected_cache)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instance_nw_info(self.context,
+ fake_instance)
+ self.assertEqual(fake_nw_info, result)
+
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
@@ -2562,7 +2843,7 @@ class ComputeTestCase(BaseTestCase):
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host}
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host}
instances.append(instance_map[uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
@@ -2574,7 +2855,7 @@ class ComputeTestCase(BaseTestCase):
def fake_instance_get_by_uuid(context, instance_uuid):
if instance_uuid not in instance_map:
- raise exception.InstanceNotFound
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
return instance_map[instance_uuid]
@@ -2584,27 +2865,27 @@ class ComputeTestCase(BaseTestCase):
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
- self.assertEqual(instance, call_info['expected_instance'])
+ self.assertEqual(call_info['expected_instance'], instance)
call_info['get_nw_info'] += 1
- self.stubs.Set(db, 'instance_get_all_by_host',
+ self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stubs.Set(self.compute.conductor_api, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
- self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
+ self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- self.assertEqual(call_info['get_by_uuid'], 0)
- self.assertEqual(call_info['get_nw_info'], 1)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(0, call_info['get_by_uuid'])
+ self.assertEqual(1, call_info['get_nw_info'])
call_info['expected_instance'] = instances[1]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- self.assertEqual(call_info['get_by_uuid'], 1)
- self.assertEqual(call_info['get_nw_info'], 2)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(1, call_info['get_by_uuid'])
+ self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
instances[2]['host'] = 'not-me'
@@ -2629,6 +2910,40 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(call_info['get_by_uuid'], 3)
self.assertEqual(call_info['get_nw_info'], 4)
+ def test_poll_rescued_instances(self):
+ timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
+ not_timed_out_time = timeutils.utcnow()
+
+ instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESCUED,
+ 'launched_at': timed_out_time},
+ {'uuid': 'fake_uuid2', 'vm_state': vm_states.ACTIVE,
+ 'launched_at': timed_out_time},
+ {'uuid': 'fake_uuid3', 'vm_state': vm_states.ACTIVE,
+ 'launched_at': not_timed_out_time},
+ {'uuid': 'fake_uuid4', 'vm_state': vm_states.RESCUED,
+ 'launched_at': timed_out_time},
+ {'uuid': 'fake_uuid5', 'vm_state': vm_states.RESCUED,
+ 'launched_at': not_timed_out_time}]
+ unrescued_instances = {'fake_uuid1': False, 'fake_uuid4': False}
+
+ def fake_instance_get_all_by_host(context, host):
+ return instances
+
+ def fake_unrescue(self, context, instance):
+ unrescued_instances[instance['uuid']] = True
+
+ self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host)
+ self.stubs.Set(compute_api.API, 'unrescue', fake_unrescue)
+
+ self.flags(rescue_timeout=60)
+ ctxt = context.get_admin_context()
+
+ self.compute._poll_rescued_instances(ctxt)
+
+ for instance in unrescued_instances.values():
+ self.assertTrue(instance)
+
def test_poll_unconfirmed_resizes(self):
instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESIZED,
'task_state': None},
@@ -2666,13 +2981,13 @@ class ComputeTestCase(BaseTestCase):
def fake_migration_get_unconfirmed_by_dest_compute(context,
resize_confirm_window, dest_compute):
- self.assertEqual(dest_compute, FLAGS.host)
+ self.assertEqual(dest_compute, CONF.host)
return migrations
- def fake_migration_update(context, migration_id, values):
+ def fake_migration_update(context, m, status):
for migration in migrations:
- if migration['id'] == migration_id and 'status' in values:
- migration['status'] = values['status']
+ if migration['id'] == m['id']:
+ migration['status'] = status
def fake_confirm_resize(context, instance):
# raise exception for 'fake_uuid4' to check migration status
@@ -2687,7 +3002,7 @@ class ComputeTestCase(BaseTestCase):
fake_instance_get_by_uuid)
self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
- self.stubs.Set(db, 'migration_update',
+ self.stubs.Set(self.compute.conductor_api, 'migration_update',
fake_migration_update)
self.stubs.Set(self.compute.compute_api, 'confirm_resize',
fake_confirm_resize)
@@ -2728,7 +3043,7 @@ class ComputeTestCase(BaseTestCase):
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
@@ -2760,7 +3075,7 @@ class ComputeTestCase(BaseTestCase):
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
@@ -2793,7 +3108,7 @@ class ComputeTestCase(BaseTestCase):
#expired instances
for x in xrange(4):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
@@ -2802,7 +3117,7 @@ class ComputeTestCase(BaseTestCase):
uuid = 'fake-uuid-5'
instance_map[uuid] = {
'uuid': uuid,
- 'host': FLAGS.host,
+ 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': timeutils.utcnow(),
}
@@ -2812,6 +3127,436 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(called['get_all'])
self.assertEqual(called['set_error_state'], 4)
+ def test_get_resource_tracker_fail(self):
+ self.assertRaises(exception.NovaException,
+ self.compute._get_resource_tracker,
+ 'invalidnodename')
+
+ def test_instance_update_host_check(self):
+ # make sure rt usage doesn't happen if the host or node is different
+ def fail_get(nodename):
+ raise test.TestingException(_("wrong host/node"))
+ self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
+
+ instance = self._create_fake_instance({'host': 'someotherhost'})
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ instance = self._create_fake_instance({'node': 'someothernode'})
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ params = {'host': 'someotherhost', 'node': 'someothernode'}
+ instance = self._create_fake_instance(params)
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ def test_destroy_evacuated_instances(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'host': self.compute.host}))
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance({'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_volume_block_device_info')
+ self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(fake_context).AndReturn(
+ instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_volume_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute._legacy_nw_info('fake_network_info').AndReturn(
+ 'fake_legacy_network_info')
+ self.compute.driver.destroy(evacuated_instance,
+ 'fake_legacy_network_info',
+ 'fake_bdi',
+ False)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_init_host(self):
+ our_host = self.compute.host
+ fake_context = 'fake-context'
+ startup_instances = ['inst1', 'inst2', 'inst3']
+
+ def _do_mock_calls(defer_iptables_apply):
+ self.compute.driver.init_host(host=our_host)
+ context.get_admin_context().AndReturn(fake_context)
+ self.compute.conductor_api.instance_get_all_by_host(
+ fake_context, our_host).AndReturn(startup_instances)
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_on()
+ self.compute._destroy_evacuated_instances(fake_context)
+ self.compute._init_instance(fake_context, startup_instances[0])
+ self.compute._init_instance(fake_context, startup_instances[1])
+ self.compute._init_instance(fake_context, startup_instances[2])
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_off()
+ self.compute._report_driver_status(fake_context)
+ self.compute.publish_service_capabilities(fake_context)
+
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_on')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_off')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.compute,
+ '_destroy_evacuated_instances')
+ self.mox.StubOutWithMock(self.compute,
+ '_init_instance')
+ self.mox.StubOutWithMock(self.compute,
+ '_report_driver_status')
+ self.mox.StubOutWithMock(self.compute,
+ 'publish_service_capabilities')
+
+ # Test with defer_iptables_apply
+ self.flags(defer_iptables_apply=True)
+ _do_mock_calls(True)
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ self.mox.VerifyAll()
+
+ # Test without defer_iptables_apply
+ self.mox.ResetAll()
+ self.flags(defer_iptables_apply=False)
+ _do_mock_calls(False)
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_get_instances_on_driver(self):
+ fake_context = context.get_admin_context()
+
+ driver_instances = []
+ for x in xrange(10):
+ instance = dict(uuid=uuidutils.generate_uuid())
+ driver_instances.append(instance)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_by_uuid')
+
+ self.compute.driver.list_instance_uuids().AndReturn(
+ [inst['uuid'] for inst in driver_instances])
+ for x in xrange(len(driver_instances)):
+ self.compute.conductor_api.instance_get_by_uuid(fake_context,
+ driver_instances[x]['uuid']).AndReturn(
+ driver_instances[x])
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instances_on_driver(fake_context)
+ self.assertEqual(driver_instances, result)
+
+ def test_get_instances_on_driver_fallback(self):
+ # Test getting instances when driver doesn't support
+ # 'list_instance_uuids'
+ fake_context = context.get_admin_context()
+
+ all_instances = []
+ driver_instances = []
+ for x in xrange(10):
+ instance = dict(name=uuidutils.generate_uuid())
+ if x % 2:
+ driver_instances.append(instance)
+ all_instances.append(instance)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instances')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_get_all')
+
+ self.compute.driver.list_instance_uuids().AndRaise(
+ NotImplementedError())
+ self.compute.driver.list_instances().AndReturn(
+ [inst['name'] for inst in driver_instances])
+ self.compute.conductor_api.instance_get_all(
+ fake_context).AndReturn(all_instances)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instances_on_driver(fake_context)
+ self.assertEqual(driver_instances, result)
+
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'someotherhost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # make sure instance is updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], dest)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'srchost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=False)
+
+ # make sure instance was not updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], 'srchost')
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+
+ # creating testdata
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+
+ volume_id = 'fake'
+ values = {'instance_uuid': inst_ref['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+
+ admin = context.get_admin_context()
+ db.block_device_mapping_create(admin, values)
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == volume_id
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
+ self.compute._setup_block_device_mapping(mox.IsA(admin),
+ mox.IsA(inst_ref),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ self.compute.rebuild_instance(admin, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ admin, inst_uuid):
+ db.block_device_mapping_destroy(admin, bdms['id'])
+ db.instance_destroy(admin, inst_uuid)
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {},
+ mox.IgnoreArg(), None,
+ mox.IgnoreArg(), mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)"""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'},
+ }
+
+ def set_shared_storage(instance):
+ return False
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref),
+ mox.IsA(fake_image), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass='newpass',
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=False)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raise an exception."""
+
+ # creating testdata
+ c = self.context.elevated()
+ inst_ref = self._create_fake_instance({'host': 'fake_host_2'})
+ dest = self.compute.host
+
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance_uuid = instance['uuid']
+ dest = self.compute.host
+
+ self.compute.run_instance(self.context, instance=instance)
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
+
class ComputeAPITestCase(BaseTestCase):
@@ -2835,6 +3580,14 @@ class ComputeAPITestCase(BaseTestCase):
'ramdisk_id': 'fake_ramdisk_id'},
}
+ def fake_show(obj, context, image_id):
+ if image_id:
+ return self.fake_image
+ else:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ self.fake_show = fake_show
+
def _run_instance(self, params=None):
instance = jsonutils.to_primitive(self._create_fake_instance(params))
instance_uuid = instance['uuid']
@@ -2845,98 +3598,90 @@ class ComputeAPITestCase(BaseTestCase):
return instance, instance_uuid
def test_create_with_too_little_ram(self):
- """Test an instance type with too little memory"""
+ # Test an instance type with too little memory.
inst_type = instance_types.get_default_instance_type()
inst_type['memory_mb'] = 1
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['min_ram'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeMemoryTooSmall,
- self.compute_api.create, self.context, inst_type, None)
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
# Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
+ inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
- """Test an instance type with too little disk space"""
+ # Test an instance type with too little disk space.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['min_disk'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['min_disk'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
- self.compute_api.create, self.context, inst_type, None)
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
# Now increase the inst_type disk space and make sure all is fine.
inst_type['root_gb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
+ inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
- """Test an instance type with just enough ram and disk space"""
+ # Test an instance type with just enough ram and disk space.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 2
inst_type['memory_mb'] = 2
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['min_ram'] = 2
- img['min_disk'] = 2
- img['name'] = 'fake_name'
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['min_ram'] = 2
+ self.fake_image['min_disk'] = 2
+ self.fake_image['name'] = 'fake_name'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
+ inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
- """Test an instance type with no min_ram or min_disk"""
+ # Test an instance type with no min_ram or min_disk.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
inst_type['memory_mb'] = 1
- def fake_show(*args):
- return copy.copy(self.fake_image)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
+ inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_instance_defaults_display_name(self):
- """Verify that an instance cannot be created without a display_name."""
+ # Verify that an instance cannot be created without a display_name.
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
- instance_types.get_default_instance_type(), None, **instance)
+ instance_types.get_default_instance_type(),
+ 'fake-image-uuid', **instance)
try:
self.assertNotEqual(ref[0]['display_name'], None)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
- """Make sure image properties are copied into system metadata."""
+ # Make sure image properties are copied into system metadata.
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
- image_href=None)
+ image_href='fake-image-uuid')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
@@ -2952,7 +3697,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
- """Make sure create associates security groups"""
+ # Make sure create associates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
@@ -2963,7 +3708,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(len(db.security_group_get_by_instance(
self.context, ref[0]['id'])), 1)
group = db.security_group_get(self.context, group['id'])
- self.assert_(len(group.instances) == 1)
+ self.assert_(len(group['instances']) == 1)
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['uuid'])
@@ -2982,55 +3727,46 @@ class ComputeAPITestCase(BaseTestCase):
len(db.instance_get_all(self.context)))
def test_create_with_large_user_data(self):
- """Test an instance type with too much user data."""
+ # Test an instance type with too much user data.
inst_type = instance_types.get_default_instance_type()
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['min_ram'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataTooLarge,
- self.compute_api.create, self.context, inst_type, None,
- user_data=('1' * 65536))
+ self.compute_api.create, self.context, inst_type,
+ self.fake_image['id'], user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
- """Test an instance type with malformed user data."""
+ # Test an instance type with malformed user data.
inst_type = instance_types.get_default_instance_type()
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['min_ram'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataMalformed,
- self.compute_api.create, self.context, inst_type, None,
- user_data='banana')
+ self.compute_api.create, self.context, inst_type,
+ self.fake_image['id'], user_data='banana')
def test_create_with_base64_user_data(self):
- """Test an instance type with ok much user data."""
+ # Test an instance type with ok much user data.
inst_type = instance_types.get_default_instance_type()
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['min_ram'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
# NOTE(mikal): a string of length 48510 encodes to 65532 characters of
# base64
(refs, resv_id) = self.compute_api.create(
- self.context, inst_type, None,
+ self.context, inst_type, self.fake_image['id'],
user_data=base64.encodestring('1' * 48510))
db.instance_destroy(self.context, refs[0]['uuid'])
def test_default_hostname_generator(self):
- fake_uuids = [str(utils.gen_uuid()) for x in xrange(4)]
+ fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
orig_populate = self.compute_api._populate_instance_for_create
@@ -3056,7 +3792,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
- """Make sure destroying disassociates security groups"""
+ # Make sure destroying disassociates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
@@ -3067,12 +3803,12 @@ class ComputeAPITestCase(BaseTestCase):
try:
db.instance_destroy(self.context, ref[0]['uuid'])
group = db.security_group_get(self.context, group['id'])
- self.assert_(len(group.instances) == 0)
+ self.assert_(len(group['instances']) == 0)
finally:
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
- """Make sure destroying security groups disassociates instances"""
+ # Make sure destroying security groups disassociates instances.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
@@ -3086,7 +3822,7 @@ class ComputeAPITestCase(BaseTestCase):
admin_deleted_context = context.get_admin_context(
read_deleted="only")
group = db.security_group_get(admin_deleted_context, group['id'])
- self.assert_(len(group.instances) == 0)
+ self.assert_(len(group['instances']) == 0)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
@@ -3158,7 +3894,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.compute_api.delete(self.context, instance)
@@ -3169,7 +3905,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_in_resized(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance['vm_state'] = vm_states.RESIZED
@@ -3193,7 +3929,7 @@ class ComputeAPITestCase(BaseTestCase):
old_time = datetime.datetime(2012, 4, 1)
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
timeutils.set_time_override(old_time)
self.compute_api.delete(self.context, instance)
timeutils.clear_time_override()
@@ -3212,19 +3948,19 @@ class ComputeAPITestCase(BaseTestCase):
def test_repeated_delete_quota(self):
in_use = {'instances': 1}
- def fake_reserve(context, **deltas):
+ def fake_reserve(context, expire=None, project_id=None, **deltas):
return dict(deltas.iteritems())
self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
- def fake_commit(context, deltas):
+ def fake_commit(context, deltas, project_id=None):
for k, v in deltas.iteritems():
in_use[k] = in_use.get(k, 0) + v
self.stubs.Set(QUOTAS, 'commit', fake_commit)
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.compute_api.delete(self.context, instance)
self.compute_api.delete(self.context, instance)
@@ -3244,7 +3980,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_handles_host_setting_race_condition(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance['host'] = None # make it think host was never set
self.compute_api.delete(self.context, instance)
@@ -3255,7 +3991,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_fail(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
@@ -3268,10 +4004,11 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_soft(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
+ nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg(),
+ project_id=mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.soft_delete(self.context, instance)
@@ -3283,7 +4020,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_soft_fail(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
@@ -3296,10 +4033,11 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_soft_rollback(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
- nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg())
+ nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg(),
+ project_id=mox.IgnoreArg())
self.mox.ReplayAll()
def fail(*args, **kwargs):
@@ -3316,9 +4054,9 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_force_delete(self):
- """Ensure instance can be deleted after a soft delete"""
+ # Ensure instance can be deleted after a soft delete.
instance = jsonutils.to_primitive(self._create_fake_instance(params={
- 'host': FLAGS.host}))
+ 'host': CONF.host}))
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3339,7 +4077,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], task_states.DELETING)
def test_suspend(self):
- """Ensure instance can be suspended"""
+ # Ensure instance can be suspended.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3354,7 +4092,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_resume(self):
- """Ensure instance can be resumed (if suspended)"""
+ # Ensure instance can be resumed (if suspended).
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -3371,7 +4109,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_pause(self):
- """Ensure instance can be paused"""
+ # Ensure instance can be paused.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3386,7 +4124,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_unpause(self):
- """Ensure instance can be unpaused"""
+ # Ensure instance can be unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3408,9 +4146,9 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_restore(self):
- """Ensure instance can be restored from a soft delete"""
+ # Ensure instance can be restored from a soft delete.
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.soft_delete(self.context, instance)
@@ -3423,8 +4161,12 @@ class ComputeAPITestCase(BaseTestCase):
{'vm_state': vm_states.SOFT_DELETED,
'task_state': None})
+ # Ensure quotas are committed
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
+ if self.__class__.__name__ == 'CellsComputeAPITestCase':
+ # Called a 2nd time (for the child cell) when testing cells
+ nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.restore(self.context, instance)
@@ -3479,15 +4221,24 @@ class ComputeAPITestCase(BaseTestCase):
'preserved': 'preserve this!'})
db.instance_destroy(self.context, instance['uuid'])
+ def test_rebuild_no_image(self):
+ instance = jsonutils.to_primitive(
+ self._create_fake_instance(params={'image_ref': ''}))
+ instance_uuid = instance['uuid']
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.compute.run_instance(self.context, instance=instance)
+ self.compute_api.rebuild(self.context, instance, '', 'new_password')
+
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_states.REBUILDING)
+
def _stub_out_reboot(self, device_name):
def fake_reboot_instance(rpcapi, context, instance,
block_device_info,
- network_info,
reboot_type):
self.assertEqual(
block_device_info['block_device_mapping'][0]['mount_device'],
device_name)
- self.assertEqual(network_info[0]['network']['bridge'], 'fake_br1')
self.stubs.Set(nova.compute.rpcapi.ComputeAPI, 'reboot_instance',
fake_reboot_instance)
@@ -3495,7 +4246,7 @@ class ComputeAPITestCase(BaseTestCase):
lambda x: False)
def test_reboot_soft(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3521,7 +4272,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_reboot_hard(self):
- """Ensure instance can be hard rebooted"""
+ # Ensure instance can be hard rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3547,7 +4298,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_hard_reboot_of_soft_rebooting_instance(self):
- """Ensure instance can be hard rebooted while soft rebooting"""
+ # Ensure instance can be hard rebooted while soft rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3565,7 +4316,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_soft_reboot_of_rebooting_instance(self):
- """Ensure instance can't be soft rebooted while rebooting"""
+ # Ensure instance can't be soft rebooted while rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3583,7 +4334,7 @@ class ComputeAPITestCase(BaseTestCase):
reboot_type)
def test_hostname_create(self):
- """Ensure instance hostname is set during creation."""
+ # Ensure instance hostname is set during creation.
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
@@ -3593,7 +4344,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual('test-host', instances[0]['hostname'])
def test_set_admin_password(self):
- """Ensure instance can have its admin password set"""
+ # Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3646,7 +4397,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_snapshot(self):
- """Ensure a snapshot of an instance can be created"""
+ # Ensure a snapshot of an instance can be created.
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
@@ -3660,6 +4411,31 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_snapshot_given_image_uuid(self):
+ """Ensure a snapshot of an instance can be created when image UUID
+ is already known.
+ """
+ instance = self._create_fake_instance()
+ name = 'snap1'
+ extra_properties = {'extra_param': 'value1'}
+ recv_meta = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties)
+ image_id = recv_meta['id']
+
+ def fake_show(meh, context, id):
+ return recv_meta
+
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': None})
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ image = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties,
+ image_id=image_id)
+ self.assertEqual(image, recv_meta)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_snapshot_minram_mindisk_VHD(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -3667,30 +4443,25 @@ class ComputeAPITestCase(BaseTestCase):
and min_disk set to that of the original instances flavor.
"""
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['disk_format'] = 'vhd'
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image.update(disk_format='vhd',
+ min_ram=1, min_disk=1)
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
- instance = self._create_fake_instance()
- inst_params = {'root_gb': 2, 'memory_mb': 256}
- instance['instance_type'].update(inst_params)
+ instance = self._create_fake_instance(type_name='m1.small')
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
- self.assertEqual(image['min_ram'], 256)
- self.assertEqual(image['min_disk'], 2)
+ instance_type = instance['instance_type']
+ self.assertEqual(image['min_ram'], instance_type['memory_mb'])
+ self.assertEqual(image['min_disk'], instance_type['root_gb'])
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
- db.instance_destroy(self.context, instance['uuid'])
-
def test_snapshot_minram_mindisk(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -3699,13 +4470,10 @@ class ComputeAPITestCase(BaseTestCase):
image had a disk format of vhd.
"""
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['disk_format'] = 'raw'
- img['min_ram'] = 512
- img['min_disk'] = 1
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['disk_format'] = 'raw'
+ self.fake_image['min_ram'] = 512
+ self.fake_image['min_disk'] = 1
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
instance = self._create_fake_instance()
@@ -3729,12 +4497,9 @@ class ComputeAPITestCase(BaseTestCase):
Do not show an attribute that the orig img did not have.
"""
- def fake_show(*args):
- img = copy.copy(self.fake_image)
- img['disk_format'] = 'raw'
- img['min_disk'] = 1
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.fake_image['disk_format'] = 'raw'
+ self.fake_image['min_disk'] = 1
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
instance = self._create_fake_instance()
@@ -3760,9 +4525,12 @@ class ComputeAPITestCase(BaseTestCase):
"""
def fake_show(*args):
- raise exception.ImageNotFound
+ raise exception.ImageNotFound(image_id="fake")
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ if not self.__class__.__name__ == "CellsComputeAPITestCase":
+ # Cells tests will call this a 2nd time in child cell with
+ # the newly created image_id, and we want that one to succeed.
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
@@ -3805,7 +4573,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertFalse('spam' in properties)
def test_backup(self):
- """Can't backup an instance which is already being backed up."""
+ # Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
image = self.compute_api.backup(self.context, instance,
'backup1', 'DAILY', None,
@@ -3821,7 +4589,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_backup_conflict(self):
- """Can't backup an instance which is already being backed up."""
+ # Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_BACKUP}
db.instance_update(self.context, instance['uuid'], instance_values)
@@ -3838,7 +4606,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_conflict(self):
- """Can't snapshot an instance which is already being snapshotted."""
+ # Can't snapshot an instance which is already being snapshotted.
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_SNAPSHOT}
db.instance_update(self.context, instance['uuid'], instance_values)
@@ -3897,7 +4665,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_invalid_flavor_fails(self):
- """Ensure invalid flavors raise"""
+ # Ensure invalid flavors raise.
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
@@ -3923,12 +4691,12 @@ class ComputeAPITestCase(BaseTestCase):
flavorid, 0, 1.0, True)
instance_types.destroy(name)
self.assertRaises(exception.FlavorNotFound, self.compute_api.resize,
- self.context, instance, 200)
+ self.context, instance, flavorid)
self.compute.terminate_instance(self.context, instance=instance)
def test_resize_same_flavor_fails(self):
- """Ensure invalid flavors raise"""
+ # Ensure invalid flavors raise.
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
@@ -3940,6 +4708,59 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
+ def test_resize_quota_exceeds_fails(self):
+ instance = self._create_fake_instance()
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ instance = jsonutils.to_primitive(instance)
+ self.compute.run_instance(self.context, instance=instance)
+
+ name = 'test_resize_with_big_mem'
+ flavorid = 11
+ memory_mb = 102400
+ root_gb = 0
+ vcpus = 1
+ instance_types.create(name, memory_mb, vcpus, root_gb, 0,
+ flavorid, 0, 1.0, True)
+ self.assertRaises(exception.TooManyInstances, self.compute_api.resize,
+ self.context, instance, flavorid)
+
+ instance_types.destroy(name)
+ self.compute.terminate_instance(self.context, instance=instance)
+
+ def test_resize_revert_deleted_flavor_fails(self):
+ orig_name = 'test_resize_revert_orig_flavor'
+ orig_flavorid = 11
+ memory_mb = 128
+ root_gb = 0
+ vcpus = 1
+ instance_types.create(orig_name, memory_mb, vcpus, root_gb, 0,
+ orig_flavorid, 0, 1.0, True)
+
+ instance = self._create_fake_instance(type_name=orig_name)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ instance = jsonutils.to_primitive(instance)
+ self.compute.run_instance(self.context, instance=instance)
+
+ old_instance_type_id = instance['instance_type_id']
+ new_flavor = instance_types.get_instance_type_by_name('m1.tiny')
+ new_flavorid = new_flavor['flavorid']
+ new_instance_type_id = new_flavor['id']
+ self.compute_api.resize(self.context, instance, new_flavorid)
+
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': old_instance_type_id,
+ 'new_instance_type_id': new_instance_type_id,
+ 'status': 'finished'})
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': None,
+ 'vm_state': vm_states.RESIZED})
+ instance_types.destroy(orig_name)
+ self.assertRaises(exception.InstanceTypeNotFound,
+ self.compute_api.revert_resize,
+ self.context, instance)
+ self.compute.terminate_instance(self.context, instance=instance)
+
def test_migrate(self):
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
@@ -3973,10 +4794,11 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance(dict(host='host2'))
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
+ instance['instance_type']['extra_specs'] = []
orig_instance_type = instance['instance_type']
self.compute.run_instance(self.context, instance=instance)
# We need to set the host to something 'known'. Unfortunately,
- # the compute manager is using a cached copy of FLAGS.host,
+ # the compute manager is using a cached copy of CONF.host,
# so we can't just self.flags(host='host2') before calling
# run_instance above. Also, set progress to 10 so we ensure
# it is reset to 0 in compute_api.resize(). (verified in
@@ -4011,7 +4833,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = jsonutils.to_primitive(instance)
self.compute.run_instance(self.context, instance=instance)
# We need to set the host to something 'known'. Unfortunately,
- # the compute manager is using a cached copy of FLAGS.host,
+ # the compute manager is using a cached copy of CONF.host,
# so we can't just self.flags(host='host2') before calling
# run_instance above. Also, set progress to 10 so we ensure
# it is reset to 0 in compute_api.resize(). (verified in
@@ -4025,7 +4847,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_get(self):
- """Test get instance"""
+ # Test get instance.
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
@@ -4039,7 +4861,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_with_admin_context(self):
- """Test get instance"""
+ # Test get instance.
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
@@ -4054,7 +4876,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_with_integer_id(self):
- """Test get instance with an integer id"""
+ # Test get instance with an integer id.
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
@@ -4068,7 +4890,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_all_by_name_regexp(self):
- """Test searching instances by name (display_name)"""
+ # Test searching instances by name (display_name).
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
@@ -4111,7 +4933,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_multiple_options_at_once(self):
- """Test searching by multiple options at once"""
+ # Test searching by multiple options at once.
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager()
self.stubs.Set(self.compute_api.network_api,
@@ -4120,7 +4942,7 @@ class ComputeAPITestCase(BaseTestCase):
instance1 = self._create_fake_instance({
'display_name': 'woot',
- 'id': 0,
+ 'id': 1,
'uuid': '00000000-0000-0000-0000-000000000010'})
instance2 = self._create_fake_instance({
'display_name': 'woo',
@@ -4165,7 +4987,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
- """Test searching instances by image"""
+ # Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
@@ -4195,7 +5017,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
- """Test searching instances by image"""
+ # Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
@@ -4215,9 +5037,10 @@ class ComputeAPITestCase(BaseTestCase):
search_opts={'flavor': 5})
self.assertEqual(len(instances), 0)
- # ensure unknown filter maps to an empty list, not an exception
- instances = self.compute_api.get_all(c, search_opts={'flavor': 99})
- self.assertEqual(instances, [])
+ # ensure unknown filter maps to an exception
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.get_all, c,
+ search_opts={'flavor': 99})
instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
self.assertEqual(len(instances), 1)
@@ -4234,7 +5057,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
- """Test searching instances by state"""
+ # Test searching instances by state.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
@@ -4274,7 +5097,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
- """Test searching instances by metadata"""
+ # Test searching instances by metadata.
c = context.get_admin_context()
instance0 = self._create_fake_instance()
@@ -4396,7 +5219,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(_context, instance['uuid'])
def test_get_instance_faults(self):
- """Get an instances latest fault"""
+ # Get an instances latest fault.
instance = self._create_fake_instance()
fault_fixture = {
@@ -4503,7 +5326,7 @@ class ComputeAPITestCase(BaseTestCase):
]
bdms.sort()
expected_result.sort()
- self.assertDictListMatch(bdms, expected_result)
+ self.assertThat(bdms, matchers.DictListMatches(expected_result))
self.compute_api._update_block_device_mapping(
self.context, instance_types.get_default_instance_type(),
@@ -4539,7 +5362,7 @@ class ComputeAPITestCase(BaseTestCase):
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort()
expected_result.sort()
- self.assertDictListMatch(bdms, expected_result)
+ self.assertThat(bdms, matchers.DictListMatches(expected_result))
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
@@ -4590,13 +5413,13 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_instance_architecture(self):
- """Test the instance architecture"""
+ # Test the instance architecture.
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], 'x86_64')
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
- """Test if the architecture is unknown."""
+ # Test if the architecture is unknown.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'architecture': ''}))
try:
@@ -4608,7 +5431,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
- """Test the instance_name template"""
+ # Test the instance_name template.
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
@@ -4632,7 +5455,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, i_ref['uuid'])
def test_add_remove_fixed_ip(self):
- instance = self._create_fake_instance(params={'host': FLAGS.host})
+ instance = self._create_fake_instance(params={'host': CONF.host})
self.compute_api.add_fixed_ip(self.context, instance, '1')
self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1')
self.compute_api.delete(self.context, instance)
@@ -4646,7 +5469,7 @@ class ComputeAPITestCase(BaseTestCase):
'/invalid')
def test_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
@@ -4671,7 +5494,7 @@ class ComputeAPITestCase(BaseTestCase):
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
- rpc.call(self.context, FLAGS.consoleauth_topic,
+ rpc.call(self.context, CONF.consoleauth_topic,
rpc_msg2, None).AndReturn(None)
self.mox.ReplayAll()
@@ -4689,6 +5512,23 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_get_backdoor_port(self):
+ # Test api call to get backdoor_port.
+ fake_backdoor_port = 59697
+
+ self.mox.StubOutWithMock(rpc, 'call')
+
+ rpc_msg = {'method': 'get_backdoor_port',
+ 'args': {},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
+ rpc.call(self.context, 'compute.fake_host', rpc_msg,
+ None).AndReturn(fake_backdoor_port)
+
+ self.mox.ReplayAll()
+
+ port = self.compute_api.get_backdoor_port(self.context, 'fake_host')
+ self.assertEqual(port, fake_backdoor_port)
+
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
@@ -4711,7 +5551,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(output, fake_console_output)
def test_attach_volume(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
called = {}
@@ -4767,7 +5607,7 @@ class ComputeAPITestCase(BaseTestCase):
fake_rpc_attach_volume)
def test_terminate_with_volumes(self):
- """Make sure that volumes get detached during instance termination"""
+ # Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
instance = self._create_fake_instance()
@@ -4800,7 +5640,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertTrue(result["detached"])
def test_inject_network_info(self):
- instance = self._create_fake_instance(params={'host': FLAGS.host})
+ instance = self._create_fake_instance(params={'host': CONF.host})
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
@@ -4851,7 +5691,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.delete(self.context, instance)
def test_inject_file(self):
- """Ensure we can write a file to an instance"""
+ # Ensure we can write a file to an instance.
instance = self._create_fake_instance()
self.compute_api.inject_file(self.context, instance,
"/tmp/test", "File Contents")
@@ -4875,7 +5715,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4904,7 +5744,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4945,7 +5785,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4966,7 +5806,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4995,7 +5835,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
- host='fake_dest_host')
+ host_name='fake_dest_host')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
@@ -5016,8 +5856,7 @@ def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': avail_zone})
+ 'report_count': 0})
return values
@@ -5032,15 +5871,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.stubs.Set(rpc, 'cast', fake_rpc_method)
- def test_create_invalid_availability_zone(self):
- """Ensure InvalidAggregateAction is raised with wrong avail_zone."""
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.create_aggregate,
- self.context, 'fake_aggr', 'fake_avail_zone')
-
def test_update_aggregate_metadata(self):
- """Ensure metadata can be updated"""
- _create_service_entries(self.context, {'fake_zone': ['fake_host']})
+ # Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
@@ -5050,11 +5882,12 @@ class ComputeAPIAggrTestCase(BaseTestCase):
metadata['foo_key1'] = None
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
- self.assertDictMatch(expected['metadata'], {'foo_key2': 'foo_value2'})
+ self.assertThat(expected['metadata'],
+ matchers.DictMatches({'availability_zone': 'fake_zone',
+ 'foo_key2': 'foo_value2'}))
def test_delete_aggregate(self):
- """Ensure we can delete an aggregate."""
- _create_service_entries(self.context, {'fake_zone': ['fake_host']})
+ # Ensure we can delete an aggregate.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.api.delete_aggregate(self.context, aggr['id'])
@@ -5064,7 +5897,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
- """Ensure InvalidAggregateAction is raised when non empty aggregate."""
+ # Ensure InvalidAggregateAction is raised when non empty aggregate.
_create_service_entries(self.context,
{'fake_availability_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
@@ -5074,7 +5907,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
- """Ensure we can add a host to an aggregate."""
+ # Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
@@ -5085,7 +5918,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggregate_multiple(self):
- """Ensure we can add multiple hosts to an aggregate."""
+ # Ensure we can add multiple hosts to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -5095,19 +5928,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
aggr['id'], host)
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
- def test_add_host_to_aggregate_zones_mismatch(self):
- """Ensure InvalidAggregateAction is raised when zones don't match."""
- _create_service_entries(self.context, {'fake_zoneX': ['fake_host1'],
- 'fake_zoneY': ['fake_host2']})
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', 'fake_zoneY')
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.add_host_to_aggregate,
- self.context, aggr['id'], 'fake_host1')
-
def test_add_host_to_aggregate_raise_not_found(self):
- """Ensure ComputeHostNotFound is raised when adding invalid host."""
- _create_service_entries(self.context, {'fake_zone': ['fake_host']})
+ # Ensure ComputeHostNotFound is raised when adding invalid host.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
@@ -5115,7 +5937,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.context, aggr['id'], 'invalid_host')
def test_remove_host_from_aggregate_active(self):
- """Ensure we can remove a host from an aggregate."""
+ # Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -5129,7 +5951,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
- """Ensure ComputeHostNotFound is raised when removing invalid host."""
+ # Ensure ComputeHostNotFound is raised when removing invalid host.
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
@@ -5138,6 +5960,19 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.context, aggr['id'], 'invalid_host')
+class ComputeBackdoorPortTestCase(BaseTestCase):
+ """This is for unit test coverage of backdoor port rpc."""
+
+ def setUp(self):
+ super(ComputeBackdoorPortTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.compute.backdoor_port = 59697
+
+ def test_get_backdoor_port(self):
+ port = self.compute.get_backdoor_port(self.context)
+ self.assertEqual(port, self.compute.backdoor_port)
+
+
class ComputeAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.manager."""
@@ -5145,9 +5980,9 @@ class ComputeAggrTestCase(BaseTestCase):
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
- values = {'name': 'test_aggr',
- 'availability_zone': 'test_zone'}
- self.aggr = db.aggregate_create(self.context, values)
+ values = {'name': 'test_aggr'}
+ az = {'availability_zone': 'test_zone'}
+ self.aggr = db.aggregate_create(self.context, values, metadata=az)
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
@@ -5176,7 +6011,7 @@ class ComputeAggrTestCase(BaseTestCase):
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
- self.assertEquals(aggregate['id'], self.aggr.id)
+ self.assertEquals(aggregate['id'], self.aggr['id'])
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
@@ -5190,7 +6025,7 @@ class ComputeAggrTestCase(BaseTestCase):
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
- self.assertEquals(aggregate['id'], self.aggr.id)
+ self.assertEquals(aggregate['id'], self.aggr['id'])
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
@@ -5206,20 +6041,9 @@ class ComputePolicyTestCase(BaseTestCase):
def setUp(self):
super(ComputePolicyTestCase, self).setUp()
- nova.policy.reset()
- nova.policy.init()
self.compute_api = compute.API()
- def tearDown(self):
- super(ComputePolicyTestCase, self).tearDown()
- nova.policy.reset()
-
- def _set_rules(self, rules):
- common_policy.set_rules(common_policy.Rules(
- dict((k, common_policy.parse_rule(v))
- for k, v in rules.items())))
-
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
@@ -5231,20 +6055,20 @@ class ComputePolicyTestCase(BaseTestCase):
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.delete, self.context, instance)
# reset rules to allow deletion
rules = {"compute:delete": []}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.compute_api.delete(self.context, instance)
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1')
@@ -5255,7 +6079,7 @@ class ComputePolicyTestCase(BaseTestCase):
"compute:create:attach_network": [["false:false"]],
"compute:create:attach_volume": [],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
@@ -5268,7 +6092,7 @@ class ComputePolicyTestCase(BaseTestCase):
"compute:create:attach_network": [],
"compute:create:attach_volume": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
@@ -5281,7 +6105,7 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {
"compute:get": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance['uuid'])
@@ -5290,7 +6114,7 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {
"compute:get_all": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
@@ -5303,16 +6127,16 @@ class ComputePolicyTestCase(BaseTestCase):
rules = {
"compute:get_instance_faults": [["false:false"]],
}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
- self.context, instances)
+ context.get_admin_context(), instances)
def test_force_host_fail(self):
rules = {"compute:create": [],
"compute:create:forced_host": [["role:fake"]]}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, '1',
@@ -5321,76 +6145,12 @@ class ComputePolicyTestCase(BaseTestCase):
def test_force_host_pass(self):
rules = {"compute:create": [],
"compute:create:forced_host": []}
- self._set_rules(rules)
+ self.policy.set_rules(rules)
self.compute_api.create(self.context, None, '1',
availability_zone='1:1')
-class ComputeHostAPITestCase(BaseTestCase):
- def setUp(self):
- super(ComputeHostAPITestCase, self).setUp()
- self.host_api = compute_api.HostAPI()
-
- def _rpc_call_stub(self, call_info):
- def fake_rpc_call(context, topic, msg, timeout=None):
- call_info['context'] = context
- call_info['topic'] = topic
- call_info['msg'] = msg
- self.stubs.Set(rpc, 'call', fake_rpc_call)
-
- def test_set_host_enabled(self):
- ctxt = context.RequestContext('fake', 'fake')
- call_info = {}
- self._rpc_call_stub(call_info)
-
- self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'set_host_enabled',
- 'args': {'enabled': 'fake_enabled'},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_get_host_uptime(self):
- ctxt = context.RequestContext('fake', 'fake')
- call_info = {}
- self._rpc_call_stub(call_info)
-
- self.host_api.get_host_uptime(ctxt, 'fake_host')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'get_host_uptime',
- 'args': {},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_host_power_action(self):
- ctxt = context.RequestContext('fake', 'fake')
- call_info = {}
- self._rpc_call_stub(call_info)
- self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'host_power_action',
- 'args': {'action': 'fake_action'},
- 'version':
- compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
- def test_set_host_maintenance(self):
- ctxt = context.RequestContext('fake', 'fake')
- call_info = {}
- self._rpc_call_stub(call_info)
- self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode')
- self.assertEqual(call_info['context'], ctxt)
- self.assertEqual(call_info['topic'], 'compute.fake_host')
- self.assertEqual(call_info['msg'],
- {'method': 'host_maintenance_mode',
- 'args': {'host': 'fake_host', 'mode': 'fake_mode'},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
-
-
class KeypairAPITestCase(BaseTestCase):
def setUp(self):
super(KeypairAPITestCase, self).setUp()
@@ -5459,7 +6219,7 @@ class KeypairAPITestCase(BaseTestCase):
def test_create_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
- return FLAGS.quota_key_pairs
+ return CONF.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.create_key_pair,
@@ -5493,7 +6253,7 @@ class KeypairAPITestCase(BaseTestCase):
def test_import_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
- return FLAGS.quota_key_pairs
+ return CONF.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.import_key_pair,
@@ -5536,7 +6296,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
def test_can_rebuild_instance_from_visible_instance_type(self):
instance = self._create_fake_instance()
- image_href = None
+ image_href = 'fake-image-id'
admin_password = 'blah'
instance['instance_type']['disabled'] = True
@@ -5552,7 +6312,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
when the slice is on disabled type already.
"""
instance = self._create_fake_instance()
- image_href = None
+ image_href = 'fake-image-id'
admin_password = 'blah'
instance['instance_type']['disabled'] = True
@@ -5628,7 +6388,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
class ComputeReschedulingTestCase(BaseTestCase):
- """Tests re-scheduling logic for new build requests"""
+ """Tests re-scheduling logic for new build requests."""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
@@ -5639,7 +6399,8 @@ class ComputeReschedulingTestCase(BaseTestCase):
self.updated_task_state = kwargs.get('task_state')
self.stubs.Set(self.compute, '_instance_update', fake_update)
- def _reschedule(self, request_spec=None, filter_properties=None):
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
if not filter_properties:
filter_properties = {}
@@ -5655,19 +6416,19 @@ class ComputeReschedulingTestCase(BaseTestCase):
requested_networks, is_first_time, filter_properties)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
- method_args, self.expected_task_state)
+ method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
- """no filter_properties will disable re-scheduling"""
+ # no filter_properties will disable re-scheduling.
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
- """no retry info will also disable re-scheduling"""
+ # no retry info will also disable re-scheduling.
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
- """no request spec will also disable re-scheduling"""
+ # no request spec will also disable re-scheduling.
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
@@ -5676,20 +6437,28 @@ class ComputeReschedulingTestCase(BaseTestCase):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = {'instance_uuids': ['foo', 'bar']}
+ try:
+ raise test.TestingException("just need an exception")
+ except test.TestingException:
+ exc_info = sys.exc_info()
+ exc_str = traceback.format_exception(*exc_info)
+
self.assertTrue(self._reschedule(filter_properties=filter_properties,
- request_spec=request_spec))
+ request_spec=request_spec, exc_info=exc_info))
self.assertEqual(1, len(request_spec['instance_uuids']))
self.assertEqual(self.updated_task_state, self.expected_task_state)
+ self.assertEqual(exc_str, filter_properties['retry']['exc'])
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
- """Test re-scheduling logic for prep_resize requests"""
+ """Test re-scheduling logic for prep_resize requests."""
def setUp(self):
super(ComputeReschedulingResizeTestCase, self).setUp()
self.expected_task_state = task_states.RESIZE_PREP
- def _reschedule(self, request_spec=None, filter_properties=None):
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
if not filter_properties:
filter_properties = {}
@@ -5706,7 +6475,7 @@ class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
- method_args, self.expected_task_state)
+ method_args, self.expected_task_state, exc_info=exc_info)
class InnerTestingException(Exception):
@@ -5732,11 +6501,11 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.compute._spawn(mox.IgnoreArg(), self.instance, None, None, None,
False, None).AndRaise(test.TestingException("BuildError"))
self.compute._reschedule_or_reraise(mox.IgnoreArg(), self.instance,
- None, None, None, False, None, {})
+ mox.IgnoreArg(), None, None, None, False, None, {})
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
- False, self.instance)
+ False, None, self.instance)
def test_deallocate_network_fail(self):
"""Test de-allocation of network failing before re-scheduling logic
@@ -5750,6 +6519,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
except Exception:
exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ instance_uuid, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
@@ -5760,10 +6531,10 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# error:
self.assertRaises(InnerTestingException,
self.compute._reschedule_or_reraise, self.context,
- self.instance, None, None, None, False, None, {})
+ self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_fail(self):
- """Test handling of exception from _reschedule"""
+ # Test handling of exception from _reschedule.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -5782,36 +6553,40 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
raise test.TestingException("Original")
except Exception:
# not re-scheduling, should raise the original build error:
+ exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_or_reraise, self.context,
- self.instance, None, None, None, False, None, {})
+ self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_false(self):
- """Test not-rescheduling, but no nested exception"""
+ # Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
self.mox.StubOutWithMock(self.compute, '_reschedule')
- self.compute._deallocate_network(self.context,
- self.instance)
- self.compute._reschedule(self.context, None, instance_uuid,
- {}, self.compute.scheduler_rpcapi.run_instance, method_args,
- task_states.SCHEDULING).AndReturn(False)
-
- self.mox.ReplayAll()
-
try:
raise test.TestingException("Original")
except Exception:
+ exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ instance_uuid, exc_info[0], exc_info=exc_info)
+ self.compute._deallocate_network(self.context,
+ self.instance)
+ self.compute._reschedule(self.context, None, {}, instance_uuid,
+ self.compute.scheduler_rpcapi.run_instance, method_args,
+ task_states.SCHEDULING, exc_info).AndReturn(False)
+
+ self.mox.ReplayAll()
+
# re-scheduling is False, the original build error should be
# raised here:
self.assertRaises(test.TestingException,
self.compute._reschedule_or_reraise, self.context,
- self.instance, None, None, None, False, None, {})
+ self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_true(self):
- """Test behavior when re-scheduling happens"""
+ # Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -5822,11 +6597,14 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
except Exception:
exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ instance_uuid, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance)
- self.compute._reschedule(self.context, None, instance_uuid,
- {}, self.compute.scheduler_rpcapi.run_instance,
- method_args, task_states.SCHEDULING).AndReturn(True)
+ self.compute._reschedule(self.context, None, {}, instance_uuid,
+ self.compute.scheduler_rpcapi.run_instance,
+ method_args, task_states.SCHEDULING, exc_info).AndReturn(
+ True)
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
@@ -5834,7 +6612,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
# re-scheduling is True, original error is logged, but nothing
# is raised:
self.compute._reschedule_or_reraise(self.context, self.instance,
- None, None, None, False, None, {})
+ exc_info, None, None, None, False, None, {})
class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
@@ -5859,7 +6637,8 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
- self.instance, self.instance_type, None, None, None)
+ self.instance, mox.IgnoreArg(), self.instance_type, None, None,
+ None)
self.mox.ReplayAll()
@@ -5883,9 +6662,11 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
try:
raise test.TestingException("Original")
except Exception:
+ exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- None, self.instance, self.instance_type, None, {}, {})
+ None, self.instance, exc_info, self.instance_type, None,
+ {}, {})
def test_reschedule_false(self):
"""Original exception should be raised if the resize is not
@@ -5903,12 +6684,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
try:
raise test.TestingException("Original")
except Exception:
+ exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- None, self.instance, self.instance_type, None, {}, {})
+ None, self.instance, exc_info, self.instance_type, None,
+ {}, {})
def test_reschedule_true(self):
- """If rescheduled, the original resize exception should be logged"""
+ # If rescheduled, the original resize exception should be logged.
method_args = (self.instance, self.instance_type, None, {}, {}, None)
try:
raise test.TestingException("Original")
@@ -5920,13 +6703,13 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.compute._reschedule(self.context, {}, {},
self.instance_uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
- task_states.RESIZE_PREP).AndReturn(True)
+ task_states.RESIZE_PREP, exc_info).AndReturn(True)
self.compute._log_original_error(exc_info, self.instance_uuid)
self.mox.ReplayAll()
self.compute._reschedule_resize_or_reraise(self.context, None,
- self.instance, self.instance_type, None, {}, {})
+ self.instance, exc_info, self.instance_type, None, {}, {})
class ComputeInactiveImageTestCase(BaseTestCase):
@@ -5946,8 +6729,8 @@ class ComputeInactiveImageTestCase(BaseTestCase):
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
- """Make sure we can't start an instance with a deleted image."""
+ # Make sure we can't start an instance with a deleted image.
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
- self.context, inst_type, None)
+ self.context, inst_type, 'fake-image-uuid')
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
new file mode 100644
index 000000000..3c25f9b43
--- /dev/null
+++ b/nova/tests/compute/test_compute_cells.py
@@ -0,0 +1,179 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Compute w/ Cells
+"""
+import functools
+
+from nova.compute import cells_api as compute_cells_api
+from nova import db
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+from nova.tests.compute import test_compute
+
+
+LOG = logging.getLogger('nova.tests.test_compute_cells')
+
+ORIG_COMPUTE_API = None
+
+
+def stub_call_to_cells(context, instance, method, *args, **kwargs):
+ fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
+ return fn(context, instance, *args, **kwargs)
+
+
+def stub_cast_to_cells(context, instance, method, *args, **kwargs):
+ fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+ fn(context, instance, *args, **kwargs)
+
+
+def deploy_stubs(stubs, api, original_instance=None):
+ call = stub_call_to_cells
+ cast = stub_cast_to_cells
+
+ if original_instance:
+ kwargs = dict(original_instance=original_instance)
+ call = functools.partial(stub_call_to_cells, **kwargs)
+ cast = functools.partial(stub_cast_to_cells, **kwargs)
+
+ stubs.Set(api, '_call_to_cells', call)
+ stubs.Set(api, '_cast_to_cells', cast)
+
+
+def wrap_create_instance(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ instance = self._create_fake_instance()
+
+ def fake(*args, **kwargs):
+ return instance
+
+ self.stubs.Set(self, '_create_fake_instance', fake)
+ original_instance = jsonutils.to_primitive(instance)
+ deploy_stubs(self.stubs, self.compute_api,
+ original_instance=original_instance)
+ return func(self, *args, **kwargs)
+
+ return wrapper
+
+
+class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
+ def setUp(self):
+ super(CellsComputeAPITestCase, self).setUp()
+ global ORIG_COMPUTE_API
+ ORIG_COMPUTE_API = self.compute_api
+
+ def _fake_cell_read_only(*args, **kwargs):
+ return False
+
+ def _fake_validate_cell(*args, **kwargs):
+ return
+
+ def _nop_update(context, instance, **kwargs):
+ return instance
+
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.stubs.Set(self.compute_api, '_cell_read_only',
+ _fake_cell_read_only)
+ self.stubs.Set(self.compute_api, '_validate_cell',
+ _fake_validate_cell)
+
+ # NOTE(belliott) Don't update the instance state
+ # for the tests at the API layer. Let it happen after
+ # the stub cast to cells so that expected_task_states
+ # match.
+ self.stubs.Set(self.compute_api, 'update', _nop_update)
+
+ deploy_stubs(self.stubs, self.compute_api)
+
+ def tearDown(self):
+ global ORIG_COMPUTE_API
+ self.compute_api = ORIG_COMPUTE_API
+ super(CellsComputeAPITestCase, self).tearDown()
+
+ def test_instance_metadata(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_live_migrate(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_get_backdoor_port(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_snapshot_given_image_uuid(self):
+ self.skipTest("Test doesn't apply to API cell.")
+
+ @wrap_create_instance
+ def test_snapshot(self):
+ return super(CellsComputeAPITestCase, self).test_snapshot()
+
+ @wrap_create_instance
+ def test_snapshot_image_metadata_inheritance(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_image_metadata_inheritance()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_VHD(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_VHD()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_img_missing_minram(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_img_missing_minram()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_no_image(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_no_image()
+
+ @wrap_create_instance
+ def test_backup(self):
+ return super(CellsComputeAPITestCase, self).test_backup()
+
+
+class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
+ def setUp(self):
+ super(CellsComputePolicyTestCase, self).setUp()
+ global ORIG_COMPUTE_API
+ ORIG_COMPUTE_API = self.compute_api
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ deploy_stubs(self.stubs, self.compute_api)
+
+ def tearDown(self):
+ global ORIG_COMPUTE_API
+ self.compute_api = ORIG_COMPUTE_API
+ super(CellsComputePolicyTestCase, self).tearDown()
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 056450708..f29c68627 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -24,8 +24,9 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.image import glance
from nova.network import api as network_api
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
@@ -33,11 +34,11 @@ from nova.openstack.common.notifier import test_notifier
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
-from nova import utils
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
class ComputeValidateDeviceTestCase(test.TestCase):
@@ -214,10 +215,13 @@ class UsageInfoTestCase(test.TestCase):
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
+ notifier_api._reset_drivers()
+ self.addCleanup(notifier_api._reset_drivers)
+ self.flags(use_local=True, group='conductor')
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ self.compute = importutils.import_object(CONF.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
@@ -230,12 +234,8 @@ class UsageInfoTestCase(test.TestCase):
'show', fake_show)
fake_network.set_stub_network_methods(self.stubs)
- def tearDown(self):
- notifier_api._reset_drivers()
- super(UsageInfoTestCase, self).tearDown()
-
def _create_instance(self, params={}):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
@@ -251,7 +251,7 @@ class UsageInfoTestCase(test.TestCase):
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
@@ -260,6 +260,7 @@ class UsageInfoTestCase(test.TestCase):
'other_data': 'meow'}
db.instance_system_metadata_update(self.context, instance['uuid'],
sys_metadata, False)
+ instance = db.instance_get(self.context, instance_id)
compute_utils.notify_usage_exists(self.context, instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
@@ -268,7 +269,7 @@ class UsageInfoTestCase(test.TestCase):
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], instance.uuid)
+ self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
@@ -280,12 +281,12 @@ class UsageInfoTestCase(test.TestCase):
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
def test_notify_usage_exists_deleted_instance(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
@@ -304,7 +305,7 @@ class UsageInfoTestCase(test.TestCase):
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], instance.uuid)
+ self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
@@ -316,11 +317,11 @@ class UsageInfoTestCase(test.TestCase):
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
self.compute.terminate_instance(self.context, instance)
@@ -331,7 +332,7 @@ class UsageInfoTestCase(test.TestCase):
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], instance.uuid)
+ self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
@@ -342,7 +343,7 @@ class UsageInfoTestCase(test.TestCase):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'], {})
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_about_instance_usage(self):
@@ -364,7 +365,7 @@ class UsageInfoTestCase(test.TestCase):
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['instance_id'], instance.uuid)
+ self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
@@ -375,6 +376,17 @@ class UsageInfoTestCase(test.TestCase):
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
self.assertEquals(payload['image_name'], 'fake_name')
- image_ref_url = "%s/images/1" % utils.generate_glance_url()
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
+
+
+class MetadataToDictTestCase(test.TestCase):
+ def test_metadata_to_dict(self):
+ self.assertEqual(compute_utils.metadata_to_dict(
+ [{'key': 'foo1', 'value': 'bar'},
+ {'key': 'foo2', 'value': 'baz'}]),
+ {'foo1': 'bar', 'foo2': 'baz'})
+
+ def test_metadata_to_dict_empty(self):
+ self.assertEqual(compute_utils.metadata_to_dict([]), {})
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
new file mode 100644
index 000000000..95d3c4926
--- /dev/null
+++ b/nova/tests/compute/test_host_api.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import compute
+from nova.compute import rpcapi as compute_rpcapi
+from nova import context
+from nova.openstack.common import rpc
+from nova import test
+
+
+class ComputeHostAPITestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeHostAPITestCase, self).setUp()
+ self.host_api = compute.HostAPI()
+ self.ctxt = context.get_admin_context()
+
+ def _mock_rpc_call(self, expected_message, result=None):
+ if result is None:
+ result = 'fake-result'
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(self.ctxt, 'compute.fake_host',
+ expected_message, None).AndReturn(result)
+
+ def _mock_assert_host_exists(self):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists.
+ """
+ self.mox.StubOutWithMock(self.host_api, '_assert_host_exists')
+ self.host_api._assert_host_exists(self.ctxt, 'fake_host')
+
+ def test_set_host_enabled(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'set_host_enabled',
+ 'args': {'enabled': 'fake_enabled'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+
+ self.mox.ReplayAll()
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'get_host_uptime',
+ 'args': {},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
+ self.assertEqual('fake-result', result)
+
+ def test_host_power_action(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'host_power_action',
+ 'args': {'action': 'fake_action'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.host_power_action(self.ctxt, 'fake_host',
+ 'fake_action')
+ self.assertEqual('fake-result', result)
+
+ def test_set_host_maintenance(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call(
+ {'method': 'host_maintenance_mode',
+ 'args': {'host': 'fake_host', 'mode': 'fake_mode'},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
+ self.mox.ReplayAll()
+ result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
+ 'fake_mode')
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ services = [dict(id=1, key1='val1', key2='val2', topic='compute',
+ host='host1'),
+ dict(id=2, key1='val2', key3='val3', topic='compute',
+ host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt)
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={})
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
+ services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(key1='val2'))
+ self.mox.VerifyAll()
+ self.assertEqual([exp_services[1]], result)
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
new file mode 100644
index 000000000..78ed0cea7
--- /dev/null
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -0,0 +1,99 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service with multiple compute nodes."""
+
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova import test
+from nova.virt import fake
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class BaseTestCase(test.TestCase):
+ def tearDown(self):
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+
+class FakeDriverSingleNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverSingleNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['xyz'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, dict))
+ self.assertEqual(stats['hypervisor_hostname'], 'xyz')
+
+ def test_get_available_resource(self):
+ res = self.driver.get_available_resource('xyz')
+ self.assertEqual(res['hypervisor_hostname'], 'xyz')
+
+
+class FakeDriverMultiNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverMultiNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['aaa', 'bbb'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, list))
+ self.assertEqual(len(stats), 2)
+ self.assertEqual(stats[0]['hypervisor_hostname'], 'aaa')
+ self.assertEqual(stats[1]['hypervisor_hostname'], 'bbb')
+
+ def test_get_available_resource(self):
+ res_a = self.driver.get_available_resource('aaa')
+ self.assertEqual(res_a['hypervisor_hostname'], 'aaa')
+
+ res_b = self.driver.get_available_resource('bbb')
+ self.assertEqual(res_b['hypervisor_hostname'], 'bbb')
+
+ self.assertRaises(exception.NovaException,
+ self.driver.get_available_resource, 'xxx')
+
+
+class MultiNodeComputeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MultiNodeComputeTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+ self.compute = importutils.import_object(CONF.compute_manager)
+
+ def test_update_available_resource_add_remove_node(self):
+ ctx = context.get_admin_context()
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
+
+ fake.set_nodes(['A', 'B'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B'])
+
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 64cdb8d53..53d92a13f 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute resource tracking"""
+"""Tests for compute resource tracking."""
import uuid
@@ -24,7 +24,7 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
-from nova import exception
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
@@ -36,17 +36,25 @@ LOG = logging.getLogger(__name__)
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_LOCAL_GB = 6
FAKE_VIRT_VCPUS = 1
+CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
- """Pretend version of a lame virt driver"""
+ """Pretend version of a lame virt driver."""
+
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
- def get_available_resource(self):
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
+ def legacy_nwinfo(self):
+ return True
+
class FakeVirtDriver(driver.ComputeDriver):
@@ -59,7 +67,10 @@ class FakeVirtDriver(driver.ComputeDriver):
self.memory_mb_used = 0
self.local_gb_used = 0
- def get_available_resource(self):
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
@@ -74,6 +85,9 @@ class FakeVirtDriver(driver.ComputeDriver):
}
return d
+ def legacy_nwinfo(self):
+ return True
+
class BaseTestCase(test.TestCase):
@@ -83,13 +97,25 @@ class BaseTestCase(test.TestCase):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
- self.context = context.RequestContext('fake', 'fake')
+ self.context = context.get_admin_context()
+
+ self.flags(use_local=True, group='conductor')
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
self._instances = {}
- self.stubs.Set(db, 'instance_get_all_by_host',
- lambda c, h: self._instances.values())
- self.stubs.Set(db, 'instance_update_and_get_original',
+ self._instance_types = {}
+
+ self.stubs.Set(self.conductor.db,
+ 'instance_get_all_by_host_and_node',
+ self._fake_instance_get_all_by_host_and_node)
+ self.stubs.Set(self.conductor.db,
+ 'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
+ self.stubs.Set(self.conductor.db,
+ 'instance_type_get', self._fake_instance_type_get)
+
+ self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
@@ -106,7 +132,8 @@ class BaseTestCase(test.TestCase):
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
- "stats": [{"key": "num_instances", "value": "1"}]
+ "stats": [{"key": "num_instances", "value": "1"}],
+ "hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
@@ -130,7 +157,7 @@ class BaseTestCase(test.TestCase):
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
- 'vm_state': vm_states.BUILDING,
+ 'vm_state': vm_states.RESIZED,
'task_state': None,
'memory_mb': 2,
'root_gb': 3,
@@ -139,12 +166,37 @@ class BaseTestCase(test.TestCase):
'project_id': '123456',
'vcpus': 1,
'host': None,
+ 'node': None,
+ 'instance_type_id': 1,
+ 'launched_on': None,
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
+ def _fake_instance_type_create(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'name': 'fakeitype',
+ 'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'vcpus': FAKE_VIRT_VCPUS,
+ 'root_gb': FAKE_VIRT_LOCAL_GB / 2,
+ 'ephemeral_gb': FAKE_VIRT_LOCAL_GB / 2,
+ 'flavorid': 'fakeflavor'
+ }
+ instance_type.update(**kwargs)
+
+ id_ = instance_type['id']
+ self._instance_types[id_] = instance_type
+ return instance_type
+
+ def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
+ return [i for i in self._instances.values() if i['host'] == host]
+
+ def _fake_instance_type_get(self, ctxt, id_):
+ return self._instance_types[id_]
+
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
@@ -153,15 +205,19 @@ class BaseTestCase(test.TestCase):
# only used in the subsequent notification:
return (instance, instance)
- def _tracker(self, unsupported=False):
- host = "fakehost"
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def _tracker(self, host=None):
- if unsupported:
- driver = UnsupportedVirtDriver()
- else:
- driver = FakeVirtDriver()
+ if host is None:
+ host = self.host
- tracker = resource_tracker.ResourceTracker(host, driver)
+ node = "fakenode"
+
+ driver = self._driver()
+
+ tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
@@ -171,10 +227,13 @@ class UnsupportedDriverTestCase(BaseTestCase):
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
- self.tracker = self._tracker(unsupported=True)
+ self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
+ def _driver(self):
+ return UnsupportedVirtDriver()
+
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
@@ -204,6 +263,23 @@ class UnsupportedDriverTestCase(BaseTestCase):
root_gb=10)
self.tracker.update_usage(self.context, instance)
+ def test_disabled_resize_claim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_instance_type_create()
+ claim = self.tracker.resize_claim(self.context, instance,
+ instance_type)
+ self.assertEqual(0, claim.memory_mb)
+ self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
+ self.assertEqual(instance_type['id'],
+ claim.migration['new_instance_type_id'])
+
+ def test_disabled_resize_context_claim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_instance_type_create()
+ with self.tracker.resize_claim(self.context, instance, instance_type) \
+ as claim:
+ self.assertEqual(0, claim.memory_mb)
+
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
@@ -221,8 +297,8 @@ class MissingComputeNodeTestCase(BaseTestCase):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
@@ -230,10 +306,10 @@ class MissingComputeNodeTestCase(BaseTestCase):
self.created = True
return self._create_compute_node()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
- return [service]
+ return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
@@ -244,22 +320,32 @@ class MissingComputeNodeTestCase(BaseTestCase):
self.assertFalse(self.tracker.disabled)
-class ResourceTestCase(BaseTestCase):
+class BaseTrackerTestCase(BaseTestCase):
+
def setUp(self):
- super(ResourceTestCase, self).setUp()
+ # setup plumbing for a working resource tracker with required
+ # database models and a compatible compute driver:
+ super(BaseTrackerTestCase, self).setUp()
+
self.tracker = self._tracker()
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self._migrations = {}
+
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
+ self.stubs.Set(db, 'migration_update',
+ self._fake_migration_update)
+ self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
+ self._fake_migration_get_in_progress_by_host_and_node)
self.tracker.update_available_resource(self.context)
- self.limits = self._basic_limits()
+ self.limits = self._limits()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
- return [self.service]
+ return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
@@ -269,36 +355,51 @@ class ResourceTestCase(BaseTestCase):
self.compute.update(values)
return self.compute
- def _basic_limits(self):
- """Get basic limits, no oversubscription"""
+ def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
+ node):
+ status = ['confirmed', 'reverted']
+ migrations = []
+
+ for migration in self._migrations.values():
+ if migration['status'] in status:
+ continue
+
+ uuid = migration['instance_uuid']
+ migration['instance'] = self._instances[uuid]
+ migrations.append(migration)
+
+ return migrations
+
+ def _fake_migration_update(self, ctxt, migration_id, values):
+ # cheat and assume there's only 1 migration present
+ migration = self._migrations.values()[0]
+ migration.update(values)
+ return migration
+
+ def _limits(self, memory_mb=FAKE_VIRT_MEMORY_MB,
+ disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS):
+ """Create limits dictionary used for oversubscribing resources."""
+
return {
- 'memory_mb': FAKE_VIRT_MEMORY_MB * 2,
- 'disk_gb': FAKE_VIRT_LOCAL_GB,
- 'vcpu': FAKE_VIRT_VCPUS,
+ 'memory_mb': memory_mb,
+ 'disk_gb': disk_gb,
+ 'vcpu': vcpus
}
- def test_update_usage_only_for_tracked(self):
- instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
- task_state=None)
- self.tracker.update_usage(self.context, instance)
+ def _assert(self, value, field, tracker=None):
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
+ if tracker is None:
+ tracker = self.tracker
- claim = self.tracker.instance_claim(self.context, instance,
- self.limits)
- self.assertNotEqual(0, claim.memory_mb)
- self.assertEqual(3, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
+ if not field in tracker.compute_node:
+ raise test.TestingException(
+ "'%(field)s' not in compute node." % locals())
+ x = tracker.compute_node[field]
- # now update should actually take effect
- instance['task_state'] = task_states.SCHEDULING
- self.tracker.update_usage(self.context, instance)
+ self.assertEqual(value, x)
- self.assertEqual(3, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
- self.assertEqual(1, self.tracker.compute_node['current_workload'])
+
+class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
@@ -314,13 +415,46 @@ class ResourceTestCase(BaseTestCase):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
- def test_claim_and_audit(self):
- self.assertEqual(5, self.tracker.compute_node['memory_mb'])
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ def test_init(self):
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
- self.assertEqual(6, self.tracker.compute_node['local_gb'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+class InstanceClaimTestCase(BaseTrackerTestCase):
+
+ def test_update_usage_only_for_tracked(self):
+ instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
+ task_state=None)
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'current_workload')
+
+ claim = self.tracker.instance_claim(self.context, instance,
+ self.limits)
+ self.assertNotEqual(0, claim.memory_mb)
+ self._assert(3, 'memory_mb_used')
+ self._assert(2, 'local_gb_used')
+
+ # now update should actually take effect
+ instance['task_state'] = task_states.SCHEDULING
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(3, 'memory_mb_used')
+ self._assert(2, 'local_gb_used')
+ self._assert(1, 'current_workload')
+
+ def test_claim_and_audit(self):
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
@@ -354,12 +488,6 @@ class ResourceTestCase(BaseTestCase):
self.assertEqual(6 - claim_disk, self.compute['free_disk_gb'])
def test_claim_and_abort(self):
- self.assertEqual(5, self.tracker.compute_node['memory_mb'])
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
-
- self.assertEqual(6, self.tracker.compute_node['local_gb'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
-
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
@@ -368,21 +496,17 @@ class ResourceTestCase(BaseTestCase):
self.limits)
self.assertNotEqual(None, claim)
- self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(claim_mem, self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
- self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
claim.abort()
- self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(5, self.compute["free_ram_mb"])
- self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(6, self.compute["free_disk_gb"])
@@ -450,8 +574,6 @@ class ResourceTestCase(BaseTestCase):
self.assertEqual(2, self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
- self.assertFalse(self.tracker.disabled)
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
@@ -493,3 +615,259 @@ class ResourceTestCase(BaseTestCase):
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
+
+ def test_skip_deleted_instances(self):
+ # ensure that the audit process skips instances that have vm_state
+ # DELETED, but the DB record is not yet deleted.
+ self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+
+
+class ResizeClaimTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+
+ self.stubs.Set(self.conductor.db,
+ 'migration_create', self._fake_migration_create)
+
+ self.instance = self._fake_instance()
+ self.instance_type = self._fake_instance_type_create()
+
+ def _fake_migration_create(self, context, values=None):
+ instance_uuid = str(uuid.uuid1())
+ migration = {
+ 'id': 1,
+ 'source_compute': 'host1',
+ 'source_node': 'fakenode',
+ 'dest_compute': 'host2',
+ 'dest_node': 'fakenode',
+ 'dest_host': '127.0.0.1',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'instance_uuid': instance_uuid,
+ 'status': 'pre-migrating',
+ 'updated_at': timeutils.utcnow()
+ }
+ if values:
+ migration.update(values)
+
+ self._migrations[migration['instance_uuid']] = migration
+ return migration
+
+ def test_claim(self):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ def test_abort(self):
+ try:
+ with self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits):
+ raise test.TestingException("abort")
+ except test.TestingException:
+ pass
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ def test_additive_claims(self):
+
+ limits = self._limits(FAKE_VIRT_MEMORY_MB * 2, FAKE_VIRT_LOCAL_GB * 2,
+ FAKE_VIRT_VCPUS * 2)
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, limits)
+ instance2 = self._fake_instance()
+ self.tracker.resize_claim(self.context, instance2, self.instance_type,
+ limits)
+
+ self._assert(2 * FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_claim_and_audit(self):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_same_host(self):
+ self.limits['vcpu'] = 3
+
+ src_type = self._fake_instance_type_create(id=2, memory_mb=1,
+ root_gb=1, ephemeral_gb=0, vcpus=1)
+ dest_type = self._fake_instance_type_create(id=2, memory_mb=2,
+ root_gb=2, ephemeral_gb=1, vcpus=2)
+
+ # make an instance of src_type:
+ instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=0,
+ vcpus=1, instance_type_id=2)
+
+ self.tracker.instance_claim(self.context, instance, self.limits)
+
+ # resize to dest_type:
+ claim = self.tracker.resize_claim(self.context, self.instance,
+ dest_type, self.limits)
+
+ self._assert(3, 'memory_mb_used')
+ self._assert(4, 'local_gb_used')
+ self._assert(3, 'vcpus_used')
+
+ self.tracker.update_available_resource(self.context)
+ claim.abort()
+
+ # only the original instance should remain, not the migration:
+ self._assert(1, 'memory_mb_used')
+ self._assert(1, 'local_gb_used')
+ self._assert(1, 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ def test_revert(self):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+ migration, itype = self.tracker.tracked_migrations[
+ self.instance['uuid']]
+ self.tracker.revert_resize(self.context, migration)
+
+ self.assertEqual(0, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+
+ def test_revert_reserve_source(self):
+ # if a revert has started at the API and audit runs on
+ # the source compute before the instance flips back to source,
+ # resources should still be held at the source based on the
+ # migration:
+ dest = "desthost"
+ dest_tracker = self._tracker(host=dest)
+ dest_tracker.update_available_resource(self.context)
+
+ self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
+ root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
+ vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
+
+ values = {'source_compute': self.host, 'dest_compute': dest,
+ 'old_instance_type_id': 1, 'new_instance_type_id': 1,
+ 'status': 'post-migrating',
+ 'instance_uuid': self.instance['uuid']}
+ migration = self._fake_migration_create(self.context, values)
+
+ # attach an instance to the destination host tracker:
+ dest_tracker.instance_claim(self.context, self.instance)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # audit and recheck to confirm migration doesn't get double counted
+ # on dest:
+ dest_tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # apply the migration to the source host tracker:
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ # flag the instance and migration as reverting and re-audit:
+ self.instance['vm_state'] = vm_states.RESIZED
+ self.instance['task_state'] = task_states.RESIZE_REVERTING
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_resize_filter(self):
+ instance = self._fake_instance(vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
+ self.assertFalse(self.tracker._instance_in_resize_state(instance))
+
+ instance = self._fake_instance(vm_state=vm_states.RESIZED,
+ task_state=task_states.SUSPENDING)
+ self.assertTrue(self.tracker._instance_in_resize_state(instance))
+
+ instance = self._fake_instance(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_MIGRATING)
+ self.assertTrue(self.tracker._instance_in_resize_state(instance))
+
+ def test_dupe_filter(self):
+ self._fake_instance_type_create(id=2, memory_mb=1, root_gb=1,
+ ephemeral_gb=1, vcpus=1)
+
+ instance = self._fake_instance(host=self.host)
+
+ values = {'source_compute': self.host, 'dest_compute': self.host,
+ 'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
+ self._fake_migration_create(self.context, values)
+ self._fake_migration_create(self.context, values)
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ def test_set_instance_host_and_node(self):
+ instance = self._fake_instance()
+ self.assertEqual(None, instance['host'])
+ self.assertEqual(None, instance['launched_on'])
+ self.assertEqual(None, instance['node'])
+
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertNotEqual(0, claim.memory_mb)
+
+ self.assertEqual('fakehost', instance['host'])
+ self.assertEqual('fakehost', instance['launched_on'])
+ self.assertEqual('fakenode', instance['node'])
+
+
+class OrphanTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(OrphanTestCase, self).setUp()
+
+ def _driver(self):
+ class OrphanVirtDriver(FakeVirtDriver):
+ def get_per_instance_usage(self):
+ return {
+ '1-2-3-4-5': {'memory_mb': 4, 'uuid': '1-2-3-4-5'},
+ '2-3-4-5-6': {'memory_mb': 4, 'uuid': '2-3-4-5-6'},
+
+ }
+
+ return OrphanVirtDriver()
+
+ def test_usage(self):
+ # 2 instances, 4 mb each
+ self.assertEqual(8, self.tracker.compute_node['memory_mb_used'])
+
+ def test_find(self):
+ # create one legit instance and verify the 2 orphans remain
+ self._fake_instance()
+ orphans = self.tracker._find_orphaned_instances()
+
+ self.assertEqual(2, len(orphans))
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 8db2534af..00b90ea65 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -21,23 +21,22 @@ Unit Tests for nova.compute.rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova import db
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class ComputeRpcAPITestCase(test.TestCase):
def setUp(self):
+ super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
inst = db.instance_create(self.context, {'host': 'fake_host',
'instance_type_id': 1})
self.fake_instance = jsonutils.to_primitive(inst)
- super(ComputeRpcAPITestCase, self).setUp()
def test_serialized_instance_has_name(self):
self.assertTrue('name' in self.fake_instance)
@@ -74,7 +73,7 @@ class ComputeRpcAPITestCase(test.TestCase):
host = kwargs['destination']
else:
host = kwargs['instance']['host']
- expected_topic = '%s.%s' % (FLAGS.compute_topic, host)
+ expected_topic = '%s.%s' % (CONF.compute_topic, host)
self.fake_args = None
self.fake_kwargs = None
@@ -174,6 +173,9 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
+ def test_get_backdoor_port(self):
+ self._test_compute_api('get_backdoor_port', 'call', host='host')
+
def test_inject_file(self):
self._test_compute_api('inject_file', 'cast',
instance=self.fake_instance, path='path', file_contents='fc')
@@ -216,7 +218,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance, block_migration='block_migration',
- disk='disk', host='host')
+ disk='disk', host='host', migrate_data=None,
+ version='2.21')
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
@@ -225,23 +228,31 @@ class ComputeRpcAPITestCase(test.TestCase):
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
- version='2.10')
+ node='node',
+ version='2.20')
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance,
block_device_info={},
- network_info={},
reboot_type='type',
- version='2.5')
+ version='2.23')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast',
instance=self.fake_instance, new_pass='pass',
injected_files='files', image_ref='ref',
- orig_image_ref='orig_ref',
- orig_sys_metadata='orig_sys_metadata', version='2.1')
+ orig_image_ref='orig_ref', bdms=[], recreate=False,
+ on_shared_storage=False, orig_sys_metadata='orig_sys_metadata',
+ version='2.22')
+
+ def test_rebuild_instance_with_shared(self):
+ self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
+ injected_files='None', image_ref='None', orig_image_ref='None',
+ bdms=[], instance=self.fake_instance, host='new_host',
+ orig_sys_metadata=None, recreate=True, on_shared_storage=True,
+ version='2.22')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
@@ -307,7 +318,8 @@ class ComputeRpcAPITestCase(test.TestCase):
instance=self.fake_instance, host='fake_host',
request_spec='fake_spec', filter_properties={},
requested_networks='networks', injected_files='files',
- admin_password='pw', is_first_time=True)
+ admin_password='pw', is_first_time=True, node='node',
+ version='2.19')
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py
index 9b063b8a0..a798670c7 100644
--- a/nova/tests/compute/test_stats.py
+++ b/nova/tests/compute/test_stats.py
@@ -15,13 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute node stats"""
+"""Tests for compute node stats."""
from nova.compute import stats
from nova.compute import task_states
from nova.compute import vm_states
-from nova import db
-from nova import exception
from nova import test
diff --git a/nova/tests/compute/test_virtapi.py b/nova/tests/compute/test_virtapi.py
new file mode 100644
index 000000000..568bf456d
--- /dev/null
+++ b/nova/tests/compute/test_virtapi.py
@@ -0,0 +1,138 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+
+from nova.compute import manager as compute_manager
+from nova import context
+from nova import db
+from nova import test
+from nova.virt import fake
+from nova.virt import virtapi
+
+
+class VirtAPIBaseTest(test.TestCase, test.APICoverage):
+
+ cover_api = virtapi.VirtAPI
+
+ def setUp(self):
+ super(VirtAPIBaseTest, self).setUp()
+ self.context = context.RequestContext('fake-user', 'fake-project')
+ self.set_up_virtapi()
+
+ def set_up_virtapi(self):
+ self.virtapi = virtapi.VirtAPI()
+
+ def assertExpected(self, method, *args, **kwargs):
+ self.assertRaises(NotImplementedError,
+ getattr(self.virtapi, method), self.context,
+ *args, **kwargs)
+
+ def test_instance_update(self):
+ self.assertExpected('instance_update', 'fake-uuid',
+ dict(host='foohost'))
+
+ def test_instance_get_by_uuid(self):
+ self.assertExpected('instance_get_by_uuid', 'fake-uuid')
+
+ def test_instance_get_all_by_host(self):
+ self.assertExpected('instance_get_all_by_host', 'fake-host')
+
+ def test_aggregate_get_by_host(self):
+ self.assertExpected('aggregate_get_by_host', 'fake-host', key=None)
+
+ def test_aggregate_metadata_add(self):
+ self.assertExpected('aggregate_metadata_add', {'id': 'fake'},
+ {'foo': 'bar'}, set_delete=False)
+
+ def test_aggregate_metadata_delete(self):
+ self.assertExpected('aggregate_metadata_delete', {'id': 'fake'},
+ 'foo')
+
+ def test_security_group_get_by_instance(self):
+ self.assertExpected('security_group_get_by_instance',
+ {'id': 'fake-id'})
+
+ def test_security_group_rule_get_by_security_group(self):
+ self.assertExpected('security_group_rule_get_by_security_group',
+ {'id': 'fake-id'})
+
+ def test_provider_fw_rule_get_all(self):
+ self.assertExpected('provider_fw_rule_get_all')
+
+ def test_agent_build_get_by_triple(self):
+ self.assertExpected('agent_build_get_by_triple',
+ 'fake-hv', 'gnu/hurd', 'fake-arch')
+
+
+class FakeVirtAPITest(VirtAPIBaseTest):
+
+ cover_api = fake.FakeVirtAPI
+
+ def set_up_virtapi(self):
+ self.virtapi = fake.FakeVirtAPI()
+
+ def assertExpected(self, method, *args, **kwargs):
+ if method == 'instance_update':
+ # NOTE(danms): instance_update actually becomes the other variant
+ # in FakeVirtAPI
+ db_method = 'instance_update_and_get_original'
+ else:
+ db_method = method
+ self.mox.StubOutWithMock(db, db_method)
+
+ if method in ('aggregate_metadata_add', 'aggregate_metadata_delete',
+ 'security_group_rule_get_by_security_group',
+ 'security_group_get_by_instance'):
+ # NOTE(danms): FakeVirtAPI will convert the first argument to
+ # argument['id'], so expect that in the actual db call
+ e_args = tuple([args[0]['id']] + list(args[1:]))
+ else:
+ e_args = args
+
+ getattr(db, db_method)(self.context, *e_args, **kwargs).AndReturn(
+ 'it worked')
+ self.mox.ReplayAll()
+ result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
+ self.assertEqual(result, 'it worked')
+
+
+class FakeCompute(object):
+ def __init__(self):
+ self.conductor_api = mox.MockAnything()
+ self.db = mox.MockAnything()
+
+ def _instance_update(self, context, instance_uuid, **kwargs):
+ # NOTE(danms): Fake this behavior from compute/manager::ComputeManager
+ return self.conductor_api.instance_update(context,
+ instance_uuid, kwargs)
+
+
+class ComputeVirtAPITest(VirtAPIBaseTest):
+
+ cover_api = compute_manager.ComputeVirtAPI
+
+ def set_up_virtapi(self):
+ self.compute = FakeCompute()
+ self.virtapi = compute_manager.ComputeVirtAPI(self.compute)
+
+ def assertExpected(self, method, *args, **kwargs):
+ self.mox.StubOutWithMock(self.compute.conductor_api, method)
+ getattr(self.compute.conductor_api, method)(
+ self.context, *args, **kwargs).AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
+ self.assertEqual(result, 'it worked')
diff --git a/nova/tests/conductor/__init__.py b/nova/tests/conductor/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/conductor/__init__.py
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
new file mode 100644
index 000000000..cc3dbfcc0
--- /dev/null
+++ b/nova/tests/conductor/test_conductor.py
@@ -0,0 +1,786 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the conductor service."""
+
+import mox
+
+from nova.compute import instance_types
+from nova.compute import vm_states
+from nova import conductor
+from nova.conductor import api as conductor_api
+from nova.conductor import manager as conductor_manager
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception as exc
+from nova.openstack.common import jsonutils
+from nova.openstack.common.rpc import common as rpc_common
+from nova.openstack.common import timeutils
+from nova import test
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+
+class FakeContext(context.RequestContext):
+ def elevated(self):
+ """Return a consistent elevated context so we can detect it."""
+ if not hasattr(self, '_elevated'):
+ self._elevated = super(FakeContext, self).elevated()
+ return self._elevated
+
+
+class _BaseTestCase(object):
+ def setUp(self):
+ super(_BaseTestCase, self).setUp()
+ self.db = None
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = FakeContext(self.user_id, self.project_id)
+
+ def stub_out_client_exceptions(self):
+ def passthru(exceptions, func, *args, **kwargs):
+ return func(*args, **kwargs)
+
+ self.stubs.Set(rpc_common, 'catch_client_exception', passthru)
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny'):
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['launch_time'] = '10'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ type_id = instance_types.get_instance_type_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = 'x86_64'
+ inst['os_type'] = 'Linux'
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+ def _do_update(self, instance_uuid, **updates):
+ return self.conductor.instance_update(self.context, instance_uuid,
+ updates)
+
+ def test_instance_update(self):
+ instance = self._create_fake_instance()
+ new_inst = self._do_update(instance['uuid'],
+ vm_state=vm_states.STOPPED)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+ self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+
+ def test_action_event_start(self):
+ self.mox.StubOutWithMock(db, 'action_event_start')
+ db.action_event_start(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_start(self.context, {})
+
+ def test_action_event_finish(self):
+ self.mox.StubOutWithMock(db, 'action_event_finish')
+ db.action_event_finish(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_finish(self.context, {})
+
+ def test_instance_update_invalid_key(self):
+ # NOTE(danms): the real DB API call ignores invalid keys
+ if self.db == None:
+ self.assertRaises(KeyError,
+ self._do_update, 'any-uuid', foobar=1)
+
+ def test_migration_get(self):
+ migration = db.migration_create(self.context.elevated(),
+ {'instance_uuid': 'fake-uuid',
+ 'status': 'migrating'})
+ self.assertEqual(jsonutils.to_primitive(migration),
+ self.conductor.migration_get(self.context,
+ migration['id']))
+
+ def test_migration_get_unconfirmed_by_dest_compute(self):
+ self.mox.StubOutWithMock(db,
+ 'migration_get_unconfirmed_by_dest_compute')
+ db.migration_get_unconfirmed_by_dest_compute(self.context,
+ 'fake-window',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
+ 'fake-window',
+ 'fake-host')
+
+ def test_migration_create(self):
+ inst = {'uuid': 'fake-uuid',
+ 'host': 'fake-host',
+ 'node': 'fake-node'}
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': inst['uuid'],
+ 'source_compute': inst['host'],
+ 'source_node': inst['node'],
+ 'fake-key': 'fake-value'}).AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.migration_create(self.context, inst,
+ {'fake-key': 'fake-value'})
+ self.assertEqual(result, 'result')
+
+ def test_migration_update(self):
+ migration = db.migration_create(self.context.elevated(),
+ {'instance_uuid': 'fake-uuid',
+ 'status': 'migrating'})
+ migration_p = jsonutils.to_primitive(migration)
+ migration = self.conductor.migration_update(self.context, migration_p,
+ 'finished')
+ self.assertEqual(migration['status'], 'finished')
+
+ def test_instance_get_by_uuid(self):
+ orig_instance = self._create_fake_instance()
+ copy_instance = self.conductor.instance_get_by_uuid(
+ self.context, orig_instance['uuid'])
+ self.assertEqual(orig_instance['name'],
+ copy_instance['name'])
+
+ def _setup_aggregate_with_host(self):
+ aggregate_ref = db.aggregate_create(self.context.elevated(),
+ {'name': 'foo'}, metadata={'availability_zone': 'foo'})
+
+ self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
+
+ aggregate_ref = db.aggregate_get(self.context.elevated(),
+ aggregate_ref['id'])
+
+ return aggregate_ref
+
+ def test_aggregate_host_add(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+
+ self.assertTrue(any([host == 'bar'
+ for host in aggregate_ref['hosts']]))
+
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_aggregate_host_delete(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+
+ self.conductor.aggregate_host_delete(self.context, aggregate_ref,
+ 'bar')
+
+ aggregate_ref = db.aggregate_get(self.context.elevated(),
+ aggregate_ref['id'])
+
+ self.assertFalse(any([host == 'bar'
+ for host in aggregate_ref['hosts']]))
+
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_aggregate_get(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+ aggregate = self.conductor.aggregate_get(self.context,
+ aggregate_ref['id'])
+ self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_aggregate_get_by_host(self):
+ self._setup_aggregate_with_host()
+ aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
+ self.assertEqual(aggregates[0]['availability_zone'], 'foo')
+
+ def test_aggregate_metadata_add(self):
+ aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
+ metadata = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
+ db.aggregate_metadata_add(
+ mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
+ metadata)
+ self.mox.ReplayAll()
+ result = self.conductor.aggregate_metadata_add(self.context,
+ aggregate,
+ metadata)
+ self.assertEqual(result, metadata)
+
+ def test_aggregate_metadata_delete(self):
+ aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
+ db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
+ self.mox.ReplayAll()
+ result = self.conductor.aggregate_metadata_delete(self.context,
+ aggregate,
+ 'fake')
+
+ def test_bw_usage_update(self):
+ self.mox.StubOutWithMock(db, 'bw_usage_update')
+ self.mox.StubOutWithMock(db, 'bw_usage_get')
+
+ update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
+ get_args = (self.context, 'uuid', 0, 'mac')
+
+ db.bw_usage_update(*update_args)
+ db.bw_usage_get(*get_args).AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.conductor.bw_usage_update(*update_args)
+ self.assertEqual(result, 'foo')
+
+ def test_get_backdoor_port(self):
+ backdoor_port = 59697
+
+ def fake_get_backdoor_port(self, context):
+ return backdoor_port
+
+ if isinstance(self.conductor, conductor_api.API):
+ self.stubs.Set(conductor_manager.ConductorManager,
+ 'get_backdoor_port', fake_get_backdoor_port)
+ port = self.conductor.get_backdoor_port(self.context, 'fake_host')
+ elif isinstance(self.conductor, conductor_api.LocalAPI):
+ try:
+ self.conductor.get_backdoor_port(self.context, 'fake_host')
+ except exc.InvalidRequest:
+ port = backdoor_port
+ else:
+ if isinstance(self.conductor, conductor_rpcapi.ConductorAPI):
+ self.stubs.Set(conductor_manager.ConductorManager,
+ 'get_backdoor_port', fake_get_backdoor_port)
+ self.conductor.backdoor_port = backdoor_port
+ port = self.conductor.get_backdoor_port(self.context)
+
+ self.assertEqual(port, backdoor_port)
+
+ def test_security_group_get_by_instance(self):
+ fake_instance = {'id': 'fake-instance'}
+ self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
+ db.security_group_get_by_instance(
+ self.context, fake_instance['id']).AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.security_group_get_by_instance(self.context,
+ fake_instance)
+ self.assertEqual(result, 'it worked')
+
+ def test_security_group_rule_get_by_security_group(self):
+ fake_secgroup = {'id': 'fake-secgroup'}
+ self.mox.StubOutWithMock(db,
+ 'security_group_rule_get_by_security_group')
+ db.security_group_rule_get_by_security_group(
+ self.context, fake_secgroup['id']).AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.security_group_rule_get_by_security_group(
+ self.context, fake_secgroup)
+ self.assertEqual(result, 'it worked')
+
+ def test_provider_fw_rule_get_all(self):
+ fake_rules = ['a', 'b', 'c']
+ self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
+ db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
+ self.mox.ReplayAll()
+ result = self.conductor.provider_fw_rule_get_all(self.context)
+ self.assertEqual(result, fake_rules)
+
+ def test_agent_build_get_by_triple(self):
+ self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
+ db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
+ 'fake-arch').AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.agent_build_get_by_triple(self.context,
+ 'fake-hv',
+ 'fake-os',
+ 'fake-arch')
+ self.assertEqual(result, 'it worked')
+
+ def test_block_device_mapping_get_all_by_instance(self):
+ fake_inst = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ db.block_device_mapping_get_all_by_instance(
+ self.context, fake_inst['uuid']).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.block_device_mapping_get_all_by_instance(
+ self.context, fake_inst)
+ self.assertEqual(result, 'fake-result')
+
+ def test_instance_get_all_hung_in_rebooting(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
+ db.instance_get_all_hung_in_rebooting(self.context, 123)
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_hung_in_rebooting(self.context, 123)
+
+ def test_instance_get_active_by_window(self):
+ self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
+ db.instance_get_active_by_window_joined(self.context, 'fake-begin',
+ 'fake-end', 'fake-proj',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_active_by_window(self.context,
+ 'fake-begin', 'fake-end',
+ 'fake-proj', 'fake-host')
+
+ def test_instance_destroy(self):
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ db.instance_destroy(self.context, 'fake-uuid')
+ self.mox.ReplayAll()
+ self.conductor.instance_destroy(self.context, {'uuid': 'fake-uuid'})
+
+ def test_instance_info_cache_delete(self):
+ self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
+ db.instance_info_cache_delete(self.context, 'fake-uuid')
+ self.mox.ReplayAll()
+ self.conductor.instance_info_cache_delete(self.context,
+ {'uuid': 'fake-uuid'})
+
+ def test_instance_info_cache_update(self):
+ fake_values = {'key1': 'val1', 'key2': 'val2'}
+ fake_instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_info_cache_update(self.context, 'fake-uuid',
+ fake_values)
+ self.mox.ReplayAll()
+ self.conductor.instance_info_cache_update(self.context,
+ fake_instance,
+ fake_values)
+
+ def test_instance_type_get(self):
+ self.mox.StubOutWithMock(db, 'instance_type_get')
+ db.instance_type_get(self.context, 'fake-id').AndReturn('fake-type')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_type_get(self.context, 'fake-id')
+ self.assertEqual(result, 'fake-type')
+
+ def test_vol_get_usage_by_time(self):
+ self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
+ db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
+ 'fake-usage')
+ self.mox.ReplayAll()
+ result = self.conductor.vol_get_usage_by_time(self.context,
+ 'fake-time')
+ self.assertEqual(result, 'fake-usage')
+
+ def test_vol_usage_update(self):
+ self.mox.StubOutWithMock(db, 'vol_usage_update')
+ db.vol_usage_update(self.context, 'fake-vol', 'rd-req', 'rd-bytes',
+ 'wr-req', 'wr-bytes', 'fake-id', 'fake-refr',
+ 'fake-bool')
+ self.mox.ReplayAll()
+ self.conductor.vol_usage_update(self.context, 'fake-vol', 'rd-req',
+ 'rd-bytes', 'wr-req', 'wr-bytes',
+ {'uuid': 'fake-id'}, 'fake-refr',
+ 'fake-bool')
+
+ def test_ping(self):
+ result = self.conductor.ping(self.context, 'foo')
+ self.assertEqual(result, {'service': 'conductor', 'arg': 'foo'})
+
+
+class ConductorTestCase(_BaseTestCase, test.TestCase):
+ """Conductor Manager Tests."""
+ def setUp(self):
+ super(ConductorTestCase, self).setUp()
+ self.conductor = conductor_manager.ConductorManager()
+ self.stub_out_client_exceptions()
+
+ def test_block_device_mapping_update_or_create(self):
+ fake_bdm = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ db.block_device_mapping_create(self.context, fake_bdm)
+ db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
+ db.block_device_mapping_update_or_create(self.context, fake_bdm)
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=True)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=False)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm)
+
+ def test_block_device_mapping_destroy(self):
+ fake_bdm = {'id': 'fake-bdm'}
+ fake_bdm2 = {'id': 'fake-bdm-2'}
+ fake_inst = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
+ self.mox.StubOutWithMock(
+ db, 'block_device_mapping_destroy_by_instance_and_device')
+ self.mox.StubOutWithMock(
+ db, 'block_device_mapping_destroy_by_instance_and_volume')
+ db.block_device_mapping_destroy(self.context, 'fake-bdm')
+ db.block_device_mapping_destroy(self.context, 'fake-bdm-2')
+ db.block_device_mapping_destroy_by_instance_and_device(self.context,
+ 'fake-uuid',
+ 'fake-device')
+ db.block_device_mapping_destroy_by_instance_and_volume(self.context,
+ 'fake-uuid',
+ 'fake-volume')
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_destroy(self.context,
+ [fake_bdm,
+ fake_bdm2])
+ self.conductor.block_device_mapping_destroy(self.context,
+ instance=fake_inst,
+ device_name='fake-device')
+ self.conductor.block_device_mapping_destroy(self.context,
+ instance=fake_inst,
+ volume_id='fake-volume')
+
+ def test_instance_get_all_by_filters(self):
+ filters = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort')
+
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
+ self.mox.StubOutWithMock(db, name)
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_get_all_by(self.context, **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (), {})
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host'))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic'))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host'))
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary'))
+
+
+class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
+ """Conductor RPC API Tests."""
+ def setUp(self):
+ super(ConductorRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_rpcapi.ConductorAPI()
+
+ def test_block_device_mapping_update_or_create(self):
+ fake_bdm = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ db.block_device_mapping_create(self.context, fake_bdm)
+ db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
+ db.block_device_mapping_update_or_create(self.context, fake_bdm)
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=True)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=False)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm)
+
+ def test_block_device_mapping_destroy(self):
+ fake_bdm = {'id': 'fake-bdm'}
+ fake_inst = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
+ self.mox.StubOutWithMock(
+ db, 'block_device_mapping_destroy_by_instance_and_device')
+ self.mox.StubOutWithMock(
+ db, 'block_device_mapping_destroy_by_instance_and_volume')
+ db.block_device_mapping_destroy(self.context, 'fake-bdm')
+ db.block_device_mapping_destroy_by_instance_and_device(self.context,
+ 'fake-uuid',
+ 'fake-device')
+ db.block_device_mapping_destroy_by_instance_and_volume(self.context,
+ 'fake-uuid',
+ 'fake-volume')
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_destroy(self.context,
+ bdms=[fake_bdm])
+ self.conductor.block_device_mapping_destroy(self.context,
+ instance=fake_inst,
+ device_name='fake-device')
+ self.conductor.block_device_mapping_destroy(self.context,
+ instance=fake_inst,
+ volume_id='fake-volume')
+
+ def test_instance_get_all_by_filters(self):
+ filters = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort')
+
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
+ self.mox.StubOutWithMock(db, name)
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_get_all_by(self.context, **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (), {})
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host'))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic'))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host'))
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
+
+
+class ConductorAPITestCase(_BaseTestCase, test.TestCase):
+ """Conductor API Tests."""
+ def setUp(self):
+ super(ConductorAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.API()
+ self.db = None
+
+ def _do_update(self, instance_uuid, **updates):
+ # NOTE(danms): the public API takes actual keyword arguments,
+ # so override the base class here to make the call correctly
+ return self.conductor.instance_update(self.context, instance_uuid,
+ **updates)
+
+ def test_bw_usage_get(self):
+ self.mox.StubOutWithMock(db, 'bw_usage_update')
+ self.mox.StubOutWithMock(db, 'bw_usage_get')
+
+ get_args = (self.context, 'uuid', 0, 'mac')
+
+ db.bw_usage_get(*get_args).AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.conductor.bw_usage_get(*get_args)
+ self.assertEqual(result, 'foo')
+
+ def test_block_device_mapping_update_or_create(self):
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ db.block_device_mapping_create(self.context, 'fake-bdm')
+ db.block_device_mapping_update(self.context,
+ 'fake-id', {'id': 'fake-id'})
+ db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
+
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
+ self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ 'fake-bdm')
+
+ def test_block_device_mapping_destroy(self):
+ fake_bdm = {'id': 'fake-bdm'}
+ fake_inst = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
+ self.mox.StubOutWithMock(
+ db, 'block_device_mapping_destroy_by_instance_and_device')
+ self.mox.StubOutWithMock(
+ db, 'block_device_mapping_destroy_by_instance_and_volume')
+ db.block_device_mapping_destroy(self.context, 'fake-bdm')
+ db.block_device_mapping_destroy_by_instance_and_device(self.context,
+ 'fake-uuid',
+ 'fake-device')
+ db.block_device_mapping_destroy_by_instance_and_volume(self.context,
+ 'fake-uuid',
+ 'fake-volume')
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_destroy(self.context, [fake_bdm])
+ self.conductor.block_device_mapping_destroy_by_instance_and_device(
+ self.context, fake_inst, 'fake-device')
+ self.conductor.block_device_mapping_destroy_by_instance_and_volume(
+ self.context, fake_inst, 'fake-volume')
+
+ def test_instance_get_all(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all(self.context)
+ db.instance_get_all_by_host(self.context.elevated(), 'fake-host')
+ db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
+ 'updated_at', 'asc')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all(self.context)
+ self.conductor.instance_get_all_by_host(self.context, 'fake-host')
+ self.conductor.instance_get_all_by_filters(self.context,
+ {'name': 'fake-inst'},
+ 'updated_at', 'asc')
+
+ def _test_stubbed(self, name, *args, **kwargs):
+ self.mox.StubOutWithMock(db, name)
+ getattr(db, name)(self.context, *args).AndReturn('fake-result')
+ if name == 'service_destroy':
+ # TODO(russellb) This is a hack ... SetUp() starts the conductor()
+ # service. There is a cleanup step that runs after this test which
+ # also deletes the associated service record. This involves a call
+ # to db.service_destroy(), which we have stubbed out.
+ db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ result = getattr(self.conductor, name)(self.context, *args)
+ self.assertEqual(
+ result, 'fake-result' if kwargs.get('returns', True) else None)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all')
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic', 'topic')
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host', 'host')
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host', 'host')
+
+ def test_service_create(self):
+ self._test_stubbed('service_create', {})
+
+ def test_service_destroy(self):
+ self._test_stubbed('service_destroy', '', returns=False)
+
+ def test_ping(self):
+ timeouts = []
+ calls = dict(count=0)
+
+ def fake_ping(_self, context, message, timeout):
+ timeouts.append(timeout)
+ calls['count'] += 1
+ if calls['count'] < 15:
+ raise rpc_common.Timeout("fake")
+
+ self.stubs.Set(conductor_api.API, 'ping', fake_ping)
+
+ self.conductor.wait_until_ready(self.context)
+
+ self.assertEqual(timeouts.count(10), 10)
+ self.assertTrue(None in timeouts)
+
+
+class ConductorLocalAPITestCase(ConductorAPITestCase):
+ """Conductor LocalAPI Tests."""
+ def setUp(self):
+ super(ConductorLocalAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalAPI()
+ self.db = db
+ self.stub_out_client_exceptions()
+
+ def test_client_exceptions(self):
+ instance = self._create_fake_instance()
+ # NOTE(danms): The LocalAPI should not raise exceptions wrapped
+ # in ClientException. KeyError should be raised if an invalid
+ # update key is passed, so use that to validate.
+ self.assertRaises(KeyError,
+ self._do_update, instance['uuid'], foo='bar')
+
+ def test_ping(self):
+ # Override test in ConductorAPITestCase
+ pass
+
+
+class ConductorImportTest(test.TestCase):
+ def test_import_conductor_local(self):
+ self.flags(use_local=True, group='conductor')
+ self.assertTrue(isinstance(conductor.API(),
+ conductor_api.LocalAPI))
+
+ def test_import_conductor_rpc(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertTrue(isinstance(conductor.API(),
+ conductor_api.API))
+
+ def test_import_conductor_override_to_local(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertTrue(isinstance(conductor.API(use_local=True),
+ conductor_api.LocalAPI))
+
+
+class ConductorPolicyTest(test.TestCase):
+ def test_all_allowed_keys(self):
+
+ def fake_db_instance_update(self, *args, **kwargs):
+ return None, None
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ fake_db_instance_update)
+
+ ctxt = context.RequestContext('fake-user', 'fake-project')
+ conductor = conductor_api.LocalAPI()
+ updates = {}
+ for key in conductor_manager.allowed_updates:
+ if key in conductor_manager.datetime_fields:
+ updates[key] = timeutils.utcnow()
+ else:
+ updates[key] = 'foo'
+ conductor.instance_update(ctxt, 'fake-instance', **updates)
+
+ def test_allowed_keys_are_real(self):
+ instance = models.Instance()
+ keys = list(conductor_manager.allowed_updates)
+
+ # NOTE(danms): expected_task_state is a parameter that gets
+ # passed to the db layer, but is not actually an instance attribute
+ del keys[keys.index('expected_task_state')]
+
+ for key in keys:
+ self.assertTrue(hasattr(instance, key))
diff --git a/nova/tests/conf_fixture.py b/nova/tests/conf_fixture.py
new file mode 100644
index 000000000..9155a3f68
--- /dev/null
+++ b/nova/tests/conf_fixture.py
@@ -0,0 +1,74 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from nova import config
+from nova import ipv6
+from nova.openstack.common import cfg
+from nova import paths
+from nova.tests.utils import cleanup_dns_managers
+
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
+CONF.import_opt('fake_network', 'nova.network.manager')
+CONF.import_opt('network_size', 'nova.network.manager')
+CONF.import_opt('num_networks', 'nova.network.manager')
+CONF.import_opt('floating_ip_dns_manager', 'nova.network.manager')
+CONF.import_opt('instance_dns_manager', 'nova.network.manager')
+CONF.import_opt('policy_file', 'nova.policy')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('api_paste_config', 'nova.wsgi')
+
+
+class ConfFixture(fixtures.Fixture):
+ """Fixture to manage global conf settings."""
+
+ def __init__(self, conf):
+ self.conf = conf
+
+ def setUp(self):
+ super(ConfFixture, self).setUp()
+
+ self.conf.set_default('api_paste_config',
+ paths.state_path_def('etc/nova/api-paste.ini'))
+ self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
+ self.conf.set_default('fake_network', True)
+ self.conf.set_default('fake_rabbit', True)
+ self.conf.set_default('flat_network_bridge', 'br100')
+ self.conf.set_default('floating_ip_dns_manager',
+ 'nova.tests.utils.dns_manager')
+ self.conf.set_default('instance_dns_manager',
+ 'nova.tests.utils.dns_manager')
+ self.conf.set_default('lock_path', None)
+ self.conf.set_default('network_size', 8)
+ self.conf.set_default('num_networks', 2)
+ self.conf.set_default('rpc_backend',
+ 'nova.openstack.common.rpc.impl_fake')
+ self.conf.set_default('rpc_cast_timeout', 5)
+ self.conf.set_default('rpc_response_timeout', 5)
+ self.conf.set_default('sql_connection', "sqlite://")
+ self.conf.set_default('sqlite_synchronous', False)
+ self.conf.set_default('use_ipv6', True)
+ self.conf.set_default('verbose', True)
+ self.conf.set_default('vlan_interface', 'eth0')
+ config.parse_args([], default_config_files=[])
+ self.addCleanup(self.conf.reset)
+ self.addCleanup(cleanup_dns_managers)
+ self.addCleanup(ipv6.api.reset_backend)
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 2b50edf05..8c2e603aa 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -18,36 +18,35 @@
"""Tests For Console proxy."""
-from nova import config
from nova.console import api as console_api
from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import rpc
from nova import test
-FLAGS = flags.FLAGS
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('console_manager', 'nova.service')
CONF.import_opt('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
- """Test case for console proxy manager"""
+ """Test case for console proxy manager."""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
- self.console = importutils.import_object(FLAGS.console_manager)
+ self.console = importutils.import_object(CONF.console_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.host = 'test_compute_host'
def _create_instance(self):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
@@ -100,7 +99,7 @@ class ConsoleTestCase(test.TestCase):
instance['host'], self.console.host,
self.console.driver.console_type)
- console_instances = [con['instance_uuid'] for con in pool.consoles]
+ console_instances = [con['instance_uuid'] for con in pool['consoles']]
self.assert_(instance['uuid'] in console_instances)
db.instance_destroy(self.context, instance['uuid'])
@@ -124,7 +123,7 @@ class ConsoleTestCase(test.TestCase):
class ConsoleAPITestCase(test.TestCase):
- """Test case for console API"""
+ """Test case for console API."""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
@@ -186,3 +185,13 @@ class ConsoleAPITestCase(test.TestCase):
self.mox.ReplayAll()
self.console_api.create_console(self.context, self.fake_uuid)
+
+ def test_get_backdoor_port(self):
+ self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI,
+ 'get_backdoor_port')
+
+ console_rpcapi.ConsoleAPI.get_backdoor_port(self.context, 'fake_host')
+
+ self.mox.ReplayAll()
+
+ self.console_api.get_backdoor_port(self.context, 'fake_host')
diff --git a/nova/tests/console/test_rpcapi.py b/nova/tests/console/test_rpcapi.py
index 9499002c6..8d9d0514f 100644
--- a/nova/tests/console/test_rpcapi.py
+++ b/nova/tests/console/test_rpcapi.py
@@ -20,40 +20,51 @@ Unit Tests for nova.console.rpcapi
from nova.console import rpcapi as console_rpcapi
from nova import context
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class ConsoleRpcAPITestCase(test.TestCase):
- def _test_console_api(self, method, **kwargs):
+ def _test_console_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = console_rpcapi.ConsoleAPI()
+ expected_retval = 'foo' if method == 'call' else None
+ expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
- expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
+ expected_msg['version'] = expected_version
+
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
- self.cast_ctxt = None
- self.cast_topic = None
- self.cast_msg = None
+ self.fake_args = None
+ self.fake_kwargs = None
- def _fake_cast(_ctxt, _topic, _msg):
- self.cast_ctxt = _ctxt
- self.cast_topic = _topic
- self.cast_msg = _msg
+ def _fake_rpc_method(*args, **kwargs):
+ self.fake_args = args
+ self.fake_kwargs = kwargs
+ if expected_retval:
+ return expected_retval
- self.stubs.Set(rpc, 'cast', _fake_cast)
+ self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
- getattr(rpcapi, method)(ctxt, **kwargs)
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
- self.assertEqual(self.cast_ctxt, ctxt)
- self.assertEqual(self.cast_topic, FLAGS.console_topic)
- self.assertEqual(self.cast_msg, expected_msg)
+ self.assertEqual(retval, expected_retval)
+ expected_args = [ctxt, CONF.console_topic, expected_msg]
+ for arg, expected_arg in zip(self.fake_args, expected_args):
+ self.assertEqual(arg, expected_arg)
def test_add_console(self):
- self._test_console_api('add_console', instance_id='i')
+ self._test_console_api('add_console', instance_id='i',
+ rpc_method='cast')
def test_remove_console(self):
- self._test_console_api('remove_console', console_id='i')
+ self._test_console_api('remove_console', console_id='i',
+ rpc_method='cast')
+
+ def test_get_backdoor_port(self):
+ self._test_console_api('get_backdoor_port', host='fake_host',
+ rpc_method='call', version='1.1')
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/consoleauth/test_consoleauth.py
index da50eb83b..15397a400 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/consoleauth/test_consoleauth.py
@@ -20,16 +20,12 @@ Tests for Consoleauth Code.
"""
-import time
-
from nova.consoleauth import manager
from nova import context
-from nova import flags
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova import test
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
@@ -42,11 +38,17 @@ class ConsoleauthTestCase(test.TestCase):
self.context = context.get_admin_context()
def test_tokens_expire(self):
- """Test that tokens expire correctly."""
+ # Test that tokens expire correctly.
+ self.useFixture(test.TimeOverride())
token = 'mytok'
self.flags(console_token_ttl=1)
self.manager.authorize_console(self.context, token, 'novnc',
'127.0.0.1', 'host', '')
self.assertTrue(self.manager.check_token(self.context, token))
- time.sleep(1.1)
+ timeutils.advance_time_seconds(1)
self.assertFalse(self.manager.check_token(self.context, token))
+
+ def test_get_backdoor_port(self):
+ self.manager.backdoor_port = 59697
+ port = self.manager.get_backdoor_port(self.context)
+ self.assertEqual(port, self.manager.backdoor_port)
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/consoleauth/test_rpcapi.py
index c59e322b8..264c4e10b 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/consoleauth/test_rpcapi.py
@@ -20,12 +20,11 @@ Unit Tests for nova.consoleauth.rpcapi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class ConsoleAuthRpcAPITestCase(test.TestCase):
@@ -33,8 +32,12 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
expected_retval = 'foo'
+ expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
- expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
+ expected_msg['version'] = expected_version
+
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
self.call_ctxt = None
self.call_topic = None
@@ -54,7 +57,7 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
- self.assertEqual(self.call_topic, FLAGS.consoleauth_topic)
+ self.assertEqual(self.call_topic, CONF.consoleauth_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
@@ -65,3 +68,7 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
def test_check_token(self):
self._test_consoleauth_api('check_token', token='t')
+
+ def test_get_backdoor_port(self):
+ self._test_consoleauth_api('get_backdoor_port', host='fake_host',
+ version='1.1')
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index a78fd2e12..b14f248e6 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
from nova import db
from nova import exception
@@ -68,7 +68,6 @@ def stub_out_db_network_api(stubs):
fixed_ip_fields = {'id': 0,
'network_id': 0,
- 'network': FakeModel(network_fields),
'address': '192.168.0.100',
'instance': False,
'instance_id': 0,
@@ -208,15 +207,6 @@ def stub_out_db_network_api(stubs):
if ips:
return FakeModel(ips[0])
- def fake_fixed_ip_get_network(context, address):
- ips = filter(lambda i: i['address'] == address,
- fixed_ips)
- if ips:
- nets = filter(lambda n: n['id'] == ips[0]['network_id'],
- networks)
- if nets:
- return FakeModel(nets[0])
-
def fake_fixed_ip_update(context, address, values):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
@@ -318,7 +308,6 @@ def stub_out_db_network_api(stubs):
fake_fixed_ip_disassociate_all_by_timeout,
fake_fixed_ip_get_by_instance,
fake_fixed_ip_get_by_address,
- fake_fixed_ip_get_network,
fake_fixed_ip_update,
fake_instance_type_get,
fake_virtual_interface_create,
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
deleted file mode 100644
index f8661e434..000000000
--- a/nova/tests/fake_flags.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import config
-from nova import flags
-
-CONF = config.CONF
-
-CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
-CONF.import_opt('fake_network', 'nova.network.manager')
-CONF.import_opt('iscsi_num_targets', 'nova.volume.driver')
-CONF.import_opt('network_size', 'nova.network.manager')
-CONF.import_opt('num_networks', 'nova.network.manager')
-CONF.import_opt('policy_file', 'nova.policy')
-
-
-def set_defaults(conf):
- conf.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini')
- conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
- conf.set_default('fake_network', True)
- conf.set_default('fake_rabbit', True)
- conf.set_default('flat_network_bridge', 'br100')
- conf.set_default('iscsi_num_targets', 8)
- conf.set_default('network_size', 8)
- conf.set_default('num_networks', 2)
- conf.set_default('vlan_interface', 'eth0')
- conf.set_default('rpc_backend', 'nova.openstack.common.rpc.impl_fake')
- conf.set_default('sql_connection', "sqlite://")
- conf.set_default('sqlite_synchronous', False)
- conf.set_default('use_ipv6', True)
- conf.set_default('verbose', True)
- conf.set_default('rpc_response_timeout', 5)
- conf.set_default('rpc_cast_timeout', 5)
- conf.set_default('lock_path', None)
diff --git a/nova/tests/fake_hosts.py b/nova/tests/fake_hosts.py
new file mode 100644
index 000000000..e6831d124
--- /dev/null
+++ b/nova/tests/fake_hosts.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides some fake hosts to test host and service related functions
+"""
+
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+OS_API_HOST_LIST = {"hosts": HOST_LIST}
+
+HOST_LIST_NOVA_ZONE = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+SERVICES_LIST = [
+ {"host": "host_c1", "topic": "compute"},
+ {"host": "host_c2", "topic": "compute"}]
diff --git a/nova/tests/fake_imagebackend.py b/nova/tests/fake_imagebackend.py
index 978c879fd..c284a5042 100644
--- a/nova/tests/fake_imagebackend.py
+++ b/nova/tests/fake_imagebackend.py
@@ -28,7 +28,7 @@ class Backend(object):
def image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
- self.path = os.path.join(instance, name)
+ self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index 020ff8192..b3d842468 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -17,8 +17,14 @@
import os
import StringIO
+from nova.openstack.common import cfg
-files = {}
+
+CONF = cfg.CONF
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
+
+files = {'console.log': True}
disk_sizes = {}
disk_backing_files = {}
disk_type = "qcow2"
@@ -86,7 +92,10 @@ def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
class File(object):
def __init__(self, path, mode=None):
- self.fp = StringIO.StringIO(files[path])
+ if path in files:
+ self.fp = StringIO.StringIO(files[path])
+ else:
+ self.fp = StringIO.StringIO(files[os.path.split(path)[-1]])
def __enter__(self):
return self.fp
@@ -94,6 +103,9 @@ class File(object):
def __exit__(self, *args):
return
+ def close(self, *args, **kwargs):
+ self.fp.close()
+
def file_open(path, mode=None):
return File(path, mode)
@@ -127,3 +139,8 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
pass
+
+
+def get_instance_path(instance):
+ # TODO(mikal): we should really just call the real one here
+ return os.path.join(CONF.instances_path, instance['name'])
diff --git a/nova/tests/fake_loadables/__init__.py b/nova/tests/fake_loadables/__init__.py
new file mode 100644
index 000000000..824243347
--- /dev/null
+++ b/nova/tests/fake_loadables/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Loadable class handling.
+"""
+
+from nova import loadables
+
+
+class FakeLoadable(object):
+ pass
+
+
+class FakeLoader(loadables.BaseLoader):
+ def __init__(self):
+ super(FakeLoader, self).__init__(FakeLoadable)
diff --git a/nova/tests/fake_loadables/fake_loadable1.py b/nova/tests/fake_loadables/fake_loadable1.py
new file mode 100644
index 000000000..58f9704b3
--- /dev/null
+++ b/nova/tests/fake_loadables/fake_loadable1.py
@@ -0,0 +1,44 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #1
+"""
+
+from nova.tests import fake_loadables
+
+
+class FakeLoadableSubClass1(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass2(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass3(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass4(object):
+ """Not a correct subclass."""
+
+
+def return_valid_classes():
+ return [FakeLoadableSubClass1, FakeLoadableSubClass2]
+
+
+def return_invalid_classes():
+ return [FakeLoadableSubClass1, _FakeLoadableSubClass3,
+ FakeLoadableSubClass4]
diff --git a/nova/tests/fake_loadables/fake_loadable2.py b/nova/tests/fake_loadables/fake_loadable2.py
new file mode 100644
index 000000000..3e365effc
--- /dev/null
+++ b/nova/tests/fake_loadables/fake_loadable2.py
@@ -0,0 +1,39 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #2
+"""
+
+from nova.tests import fake_loadables
+
+
+class FakeLoadableSubClass5(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass6(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass7(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass8(BaseException):
+ """Not a correct subclass."""
+
+
+def return_valid_class():
+ return [FakeLoadableSubClass6]
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index ff22278c5..b97999e7d 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -20,17 +20,18 @@ from nova.compute import manager as compute_manager
import nova.context
from nova import db
from nova import exception
-from nova import flags
from nova.network import api as network_api
from nova.network import manager as network_manager
from nova.network import model as network_model
from nova.network import nova_ipam_lib
-from nova import utils
+from nova.network import rpcapi as network_rpcapi
+from nova.openstack.common import cfg
from nova.virt.libvirt import config as libvirt_config
HOST = "testhost"
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
class FakeIptablesFirewallDriver(object):
@@ -52,7 +53,7 @@ class FakeVIFDriver(object):
def setattr(self, key, val):
self.__setattr__(key, val)
- def plug(self, instance, vif):
+ def get_config(self, instance, network, mapping):
conf = libvirt_config.LibvirtConfigGuestInterface()
for attr, val in conf.__dict__.iteritems():
@@ -61,9 +62,12 @@ class FakeVIFDriver(object):
return conf
+ def plug(self, instance, vif):
+ pass
+
class FakeModel(dict):
- """Represent a model from the db"""
+ """Represent a model from the db."""
def __init__(self, *args, **kwargs):
self.update(kwargs)
@@ -112,7 +116,7 @@ class FakeNetworkManager(network_manager.NetworkManager):
dict(address='10.0.0.2')]
def network_get_by_cidr(self, context, cidr):
- raise exception.NetworkNotFoundForCidr()
+ raise exception.NetworkNotFoundForCidr(cidr=cidr)
def network_create_safe(self, context, net):
fakenet = dict(net)
@@ -123,7 +127,7 @@ class FakeNetworkManager(network_manager.NetworkManager):
return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
def network_get_by_uuid(self, context, network_uuid):
- raise exception.NetworkNotFoundForUUID()
+ raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
def network_get_all(self, context):
raise exception.NoNetworksFound()
@@ -145,6 +149,7 @@ class FakeNetworkManager(network_manager.NetworkManager):
self.db = self.FakeDB()
self.deallocate_called = None
self.deallocate_fixed_ip_calls = []
+ self.network_rpcapi = network_rpcapi.NetworkAPI()
# TODO(matelakat) method signature should align with the faked one's
def deallocate_fixed_ip(self, context, address=None, host=None):
@@ -158,7 +163,7 @@ class FakeNetworkManager(network_manager.NetworkManager):
def fake_network(network_id, ipv6=None):
if ipv6 is None:
- ipv6 = FLAGS.use_ipv6
+ ipv6 = CONF.use_ipv6
fake_network = {'id': network_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
'label': 'test%d' % network_id,
@@ -185,7 +190,7 @@ def fake_network(network_id, ipv6=None):
fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
fake_network['netmask_v6'] = '64'
- if FLAGS.flat_injected:
+ if CONF.flat_injected:
fake_network['injected'] = True
return fake_network
@@ -433,7 +438,7 @@ def _get_fake_cache():
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3')]}]}}]
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
diff --git a/nova/tests/policy.json b/nova/tests/fake_policy.py
index efe2724ad..15890cdcd 100644
--- a/nova/tests/policy.json
+++ b/nova/tests/fake_policy.py
@@ -1,3 +1,21 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+policy_data = """
{
"admin_api": "role:admin",
@@ -8,6 +26,7 @@
"compute:get": "",
"compute:get_all": "",
+ "compute:get_all_tenants": "",
"compute:update": "",
@@ -84,16 +103,21 @@
"compute_extension:admin_actions:resetState": "",
"compute_extension:admin_actions:migrate": "",
"compute_extension:aggregates": "",
+ "compute_extension:agents": "",
+ "compute_extension:cells": "",
"compute_extension:certificates": "",
"compute_extension:cloudpipe": "",
+ "compute_extension:cloudpipe_update": "",
"compute_extension:config_drive": "",
"compute_extension:console_output": "",
"compute_extension:consoles": "",
+ "compute_extension:coverage_ext": "is_admin:True",
"compute_extension:createserverext": "",
"compute_extension:deferred_delete": "",
"compute_extension:disk_config": "",
"compute_extension:extended_server_attributes": "",
"compute_extension:extended_status": "",
+ "compute_extension:fixed_ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
@@ -104,6 +128,10 @@
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
+ "compute_extension:floating_ips_bulk": "",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "is_admin:True",
+ "compute_extension:hide_server_addresses": "",
"compute_extension:hosts": "",
"compute_extension:hypervisors": "",
"compute_extension:instance_usage_audit_log": "",
@@ -111,12 +139,16 @@
"compute_extension:multinic": "",
"compute_extension:networks": "",
"compute_extension:networks:view": "",
+ "compute_extension:networks_associate": "",
+ "compute_extension:os-tenant-networks": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "",
+ "compute_extension:server_password": "",
+ "compute_extension:services": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
"compute_extension:users": "",
@@ -161,6 +193,7 @@
"network:get_all_networks": "",
"network:get_network": "",
+ "network:create_networks": "",
"network:delete_network": "",
"network:disassociate_network": "",
"network:get_vifs_by_instance": "",
@@ -198,3 +231,4 @@
"network:create_public_dns_domain": "",
"network:delete_dns_domain": ""
}
+"""
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index 54fd85fe5..f2aa3ea91 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -12,12 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake volume API"""
+"""Implementation of a fake volume API."""
+
+import uuid
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
-from nova import utils
+
LOG = logging.getLogger(__name__)
@@ -34,7 +36,7 @@ class fake_volume():
if snapshot is not None:
snapshot_id = snapshot['id']
if volume_id is None:
- volume_id = str(utils.gen_uuid())
+ volume_id = str(uuid.uuid4())
self.vol = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
@@ -79,7 +81,7 @@ class fake_snapshot():
def __init__(self, volume_id, size, name, desc, id=None):
if id is None:
- id = str(utils.gen_uuid())
+ id = str(uuid.uuid4())
self.snap = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py
new file mode 100644
index 000000000..33ca49c33
--- /dev/null
+++ b/nova/tests/fakeguestfs.py
@@ -0,0 +1,140 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class GuestFS(object):
+
+ def __init__(self):
+ self.drives = []
+ self.running = False
+ self.closed = False
+ self.mounts = []
+ self.files = {}
+ self.auginit = False
+
+ def launch(self):
+ self.running = True
+
+ def shutdown(self):
+ self.running = False
+ self.mounts = []
+ self.drives = []
+
+ def close(self):
+ self.closed = True
+
+ def add_drive_opts(self, file, *args, **kwargs):
+ self.drives.append((file, kwargs['format']))
+
+ def inspect_os(self):
+ return ["/dev/guestvgf/lv_root"]
+
+ def inspect_get_mountpoints(self, dev):
+ return [["/", "/dev/mapper/guestvgf-lv_root"],
+ ["/boot", "/dev/vda1"]]
+
+ def mount_options(self, options, device, mntpoint):
+ self.mounts.append((options, device, mntpoint))
+
+ def mkdir_p(self, path):
+ if not path in self.files:
+ self.files[path] = {
+ "isdir": True,
+ "gid": 100,
+ "uid": 100,
+ "mode": 0700
+ }
+
+ def read_file(self, path):
+ if not path in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0700
+ }
+
+ return self.files[path]["content"]
+
+ def write(self, path, content):
+ if not path in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0700
+ }
+
+ self.files[path]["content"] = content
+
+ def write_append(self, path, content):
+ if not path in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0700
+ }
+
+ self.files[path]["content"] = self.files[path]["content"] + content
+
+ def stat(self, path):
+ if not path in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ return self.files[path]["mode"]
+
+ def chown(self, uid, gid, path):
+ if not path in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ if uid != -1:
+ self.files[path]["uid"] = uid
+ if gid != -1:
+ self.files[path]["gid"] = gid
+
+ def chmod(self, mode, path):
+ if not path in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ self.files[path]["mode"] = mode
+
+ def aug_init(self, root, flags):
+ self.auginit = True
+
+ def aug_close(self):
+ self.auginit = False
+
+ def aug_get(self, cfgpath):
+ if not self.auginit:
+ raise RuntimeError("Augeus not initialized")
+
+ if cfgpath == "/files/etc/passwd/root/uid":
+ return 0
+ elif cfgpath == "/files/etc/passwd/fred/uid":
+ return 105
+ elif cfgpath == "/files/etc/passwd/joe/uid":
+ return 110
+ elif cfgpath == "/files/etc/group/root/gid":
+ return 0
+ elif cfgpath == "/files/etc/group/users/gid":
+ return 500
+ elif cfgpath == "/files/etc/group/admins/gid":
+ return 600
+ raise RuntimeError("Unknown path %s", cfgpath)
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index 7c9d5b238..8d9561c7e 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -839,6 +839,9 @@ class Connection(object):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
+ def listDefinedDomains(self):
+ return []
+
def openReadOnly(uri):
return Connection(uri, readonly=True)
diff --git a/nova/tests/hyperv/README.rst b/nova/tests/hyperv/README.rst
new file mode 100644
index 000000000..c7ba16046
--- /dev/null
+++ b/nova/tests/hyperv/README.rst
@@ -0,0 +1,83 @@
+=====================================
+OpenStack Hyper-V Nova Testing Architecture
+=====================================
+
+The Hyper-V Nova Compute plugin uses Windows Management Instrumentation (WMI)
+as the main API for hypervisor related operations.
+WMI has a database / procedural oriented nature that can become difficult to
+test with a traditional static mock / stub based unit testing approach.
+
+The included Hyper-V testing framework has been developed with the
+following goals:
+
+1) Dynamic mock generation.
+2) Decoupling. No dependencies on WMI or any other module.
+ The tests are designed to work with mocked objects in all cases, including
+ OS-dependent (e.g. wmi, os, subprocess) and non-deterministic
+ (e.g. time, uuid) modules
+3) Transparency. Mocks and real objects can be swapped via DI
+ or monkey patching.
+4) Platform independence.
+5) Tests need to be executed against the real object or against the mocks
+ with a simple configuration switch. Development efforts can highly
+ benefit from this feature.
+6) It must be possible to change a mock's behavior without running the tests
+ against the hypervisor (e.g. by manually adding a value / return value).
+
+The tests included in this package include dynamically generated mock objects,
+based on the recording of the attribute values and invocations on the
+real WMI objects and other OS dependent features.
+The generated mock objects are serialized in the nova/tests/hyperv/stubs
+directory as gzipped pickled objects.
+
+An environment variable controls the execution mode of the tests.
+
+Recording mode:
+
+NOVA_GENERATE_TEST_MOCKS=True
+Tests are executed on the hypervisor (without mocks), and mock objects are
+generated.
+
+Replay mode:
+
+NOVA_GENERATE_TEST_MOCKS=
+Tests are executed with the existing mock objects (default).
+
+Mock generation is performed by nova.tests.hyperv.mockproxy.MockProxy.
+Instances of this class wrap objects that need to be mocked and act as a
+delegate on the wrapped object by leveraging Python's __getattr__ feature.
+Attribute values and method call return values are recorded at each access.
+Objects returned by attributes and method invocations are wrapped in a
+MockProxy consistently.
+From a caller perspective, the MockProxy is completely transparent,
+with the exception of calls to the type(...) builtin function.
+
+At the end of the test, a mock is generated by each MockProxy by calling
+the get_mock() method. A mock is represented by an instance of the
+nova.tests.hyperv.mockproxy.Mock class.
+
+The Mock class task consists of replicating the behaviour of the mocked
+objects / modules by returning the same values in the same order, for example:
+
+def check_path(path):
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+check_path(path)
+# The second time os.path.exists returns True
+check_path(path)
+
+The injection of MockProxy / Mock instances is performed by the
+nova.tests.hyperv.basetestcase.BaseTestCase class in the setUp()
+method via selective monkey patching.
+Mocks are serialized in tearDown() during recording.
+
+The actual Hyper-V test case inherits from BaseTestCase:
+nova.tests.hyperv.test_hypervapi.HyperVAPITestCase
+
+
+Future directions:
+
+1) Replace the pickled files with a more generic serialization option (e.g. json)
+2) Add methods to statically extend the mocks (e.g. method call return values)
+3) Extend an existing framework, e.g. mox
diff --git a/nova/tests/hyperv/basetestcase.py b/nova/tests/hyperv/basetestcase.py
index 318cf2e28..c4f6cf95f 100644
--- a/nova/tests/hyperv/basetestcase.py
+++ b/nova/tests/hyperv/basetestcase.py
@@ -21,6 +21,7 @@ TestCase for MockProxy based tests and related classes.
import gzip
import os
import pickle
+import sys
from nova import test
from nova.tests.hyperv import mockproxy
@@ -42,9 +43,16 @@ class BaseTestCase(test.TestCase):
def tearDown(self):
super(BaseTestCase, self).tearDown()
- has_errors = len([test for (test, msgs) in self._currentResult.errors
+ # python-subunit will wrap test results with a decorator.
+ # Need to access the decorated member of results to get the
+ # actual test result when using python-subunit.
+ if hasattr(self._currentResult, 'decorated'):
+ result = self._currentResult.decorated
+ else:
+ result = self._currentResult
+ has_errors = len([test for (test, msgs) in result.errors
if test.id() == self.id()]) > 0
- failed = len([test for (test, msgs) in self._currentResult.failures
+ failed = len([test for (test, msgs) in result.failures
if test.id() == self.id()]) > 0
if not has_errors and not failed:
@@ -77,7 +85,8 @@ class BaseTestCase(test.TestCase):
not in ['true', 'yes', '1']:
m = self._load_mock(module_name)
else:
- module = __import__(module_name)
+ __import__(module_name)
+ module = sys.modules[module_name]
m = mockproxy.MockProxy(module)
self._mps[module_name] = m
return m
diff --git a/nova/tests/hyperv/db_fakes.py b/nova/tests/hyperv/db_fakes.py
index 9f5572fd1..16d894df8 100644
--- a/nova/tests/hyperv/db_fakes.py
+++ b/nova/tests/hyperv/db_fakes.py
@@ -19,6 +19,7 @@ Stubouts, mocks and fixtures for the test suite
"""
import time
+import uuid
from nova.compute import task_states
from nova.compute import vm_states
@@ -29,14 +30,20 @@ from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
- 'uuid': utils.gen_uuid(),
+ 'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
- 'instance_type': 'm1.tiny',
+ 'instance_type':
+ {'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 0,
+ 'flavorid': 1,
+ 'rxtx_factor': 1}
}
@@ -104,14 +111,20 @@ def stub_out_db_instance_api(stubs):
def __init__(self, values):
self.values = values
+ def get(self, key, default=None):
+ if key in self.values:
+ return self.values[key]
+ else:
+ return default
+
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
- if key in self.values:
- return self.values[key]
- else:
- raise NotImplementedError()
+ return self.get(key)
+
+ def __str__(self):
+ return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
@@ -119,12 +132,12 @@ def stub_out_db_instance_api(stubs):
if 'instance_type' not in values:
return
- type_data = INSTANCE_TYPES[values['instance_type']]
+ instance_type = values['instance_type']
base_options = {
'name': values['name'],
'id': values['id'],
- 'uuid': utils.gen_uuid(),
+ 'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
@@ -134,11 +147,11 @@ def stub_out_db_instance_api(stubs):
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
- 'instance_type': values['instance_type'],
- 'memory_mb': type_data['memory_mb'],
- 'vcpus': type_data['vcpus'],
+ 'instance_type': instance_type,
+ 'memory_mb': instance_type['memory_mb'],
+ 'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
- 'root_gb': type_data['root_gb'],
+ 'root_gb': instance_type['root_gb'],
}
return FakeModel(base_options)
@@ -160,7 +173,12 @@ def stub_out_db_instance_api(stubs):
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
+ def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
+ return {}
+
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
+ stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
diff --git a/nova/tests/hyperv/hypervutils.py b/nova/tests/hyperv/hypervutils.py
index 7cf9f32fe..b71e60229 100644
--- a/nova/tests/hyperv/hypervutils.py
+++ b/nova/tests/hyperv/hypervutils.py
@@ -23,7 +23,7 @@ import time
from nova import exception
from nova.virt.hyperv import constants
-from nova.virt.hyperv import volumeutils
+from nova.virt.hyperv import volumeutilsV2
from xml.etree import ElementTree
# Check needed for unit testing on Unix
@@ -37,7 +37,9 @@ class HyperVUtils(object):
self.__conn_v2 = None
self.__conn_cimv2 = None
self.__conn_wmi = None
- self._volumeutils = volumeutils.VolumeUtils()
+ self.__conn_storage = None
+ self._volumeutils = volumeutilsV2.VolumeUtilsV2(
+ self._conn_storage, self._conn_wmi)
@property
def _conn(self):
@@ -63,6 +65,13 @@ class HyperVUtils(object):
self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
return self.__conn_wmi
+ @property
+ def _conn_storage(self):
+ if self.__conn_storage is None:
+ storage_namespace = '//./Root/Microsoft/Windows/Storage'
+ self.__conn_storage = wmi.WMI(moniker=storage_namespace)
+ return self.__conn_storage
+
def create_vhd(self, path):
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
@@ -78,7 +87,7 @@ class HyperVUtils(object):
% (path, ret_val))
def _check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
@@ -165,7 +174,13 @@ class HyperVUtils(object):
drive_path = hostResources[0]
volume_drives.append(drive_path)
- return (disk_files, volume_drives)
+ dvds = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Virtual CD/DVD Disk']
+ dvd_files = []
+ for dvd in dvds:
+ dvd_files.extend([c for c in dvd.Connection])
+
+ return (disk_files, volume_drives, dvd_files)
def remove_remote_vm(self, server, vm_name):
conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
@@ -181,7 +196,8 @@ class HyperVUtils(object):
#Stop the VM first.
self._set_vm_state(conn, vm_name, 3)
- (disk_files, volume_drives) = self._get_vm_disks(conn, vm_name)
+ (disk_files, volume_drives, dvd_files) = self._get_vm_disks(conn,
+ vm_name)
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
@@ -192,7 +208,7 @@ class HyperVUtils(object):
raise Exception(_('Failed to destroy vm %s') % vm_name)
#Delete associated vhd disk files.
- for disk in disk_files:
+ for disk in disk_files + dvd_files:
vhd_file = conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
@@ -203,7 +219,8 @@ class HyperVUtils(object):
def logout_iscsi_volume_sessions(self, volume_id):
target_iqn = self._get_target_iqn(volume_id)
- self._volumeutils.logout_storage_target(self._conn_wmi, target_iqn)
+ if (self.iscsi_volume_sessions_exist(volume_id)):
+ self._volumeutils.logout_storage_target(target_iqn)
def iscsi_volume_sessions_exist(self, volume_id):
target_iqn = self._get_target_iqn(volume_id)
diff --git a/nova/tests/hyperv/mockproxy.py b/nova/tests/hyperv/mockproxy.py
index ff04ea709..513422c13 100644
--- a/nova/tests/hyperv/mockproxy.py
+++ b/nova/tests/hyperv/mockproxy.py
@@ -39,15 +39,25 @@ def serialize_obj(obj):
l1 = l1 + (serialize_obj(i1),)
val = str(l1)
else:
- val = str(obj)
+ if isinstance(obj, str) or isinstance(obj, unicode):
+ val = obj
+ elif hasattr(obj, '__str__') and inspect.ismethod(obj.__str__):
+ val = str(obj)
+ else:
+ val = str(type(obj))
return val
def serialize_args(*args, **kwargs):
- """Workaround for float string conversion issues in Python 2.6"""
+ """Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
+class MockException(Exception):
+ def __init__(self, message):
+ super(MockException, self).__init__(message)
+
+
class Mock(object):
def _get_next_value(self, name):
c = self._access_count.get(name)
@@ -56,7 +66,13 @@ class Mock(object):
else:
c = c + 1
self._access_count[name] = c
- return self._values[name][c]
+
+ try:
+ value = self._values[name][c]
+ except IndexError as ex:
+ raise MockException(_('Couldn\'t find invocation num. %(c)d '
+ 'of attribute "%(name)s"') % locals())
+ return value
def _get_next_ret_value(self, name, params):
d = self._access_count.get(name)
@@ -69,7 +85,23 @@ class Mock(object):
else:
c = c + 1
d[params] = c
- return self._values[name][params][c]
+
+ try:
+ m = self._values[name]
+ except KeyError as ex:
+ raise MockException(_('Couldn\'t find attribute "%s"') % (name))
+
+ try:
+ value = m[params][c]
+ except KeyError as ex:
+ raise MockException(_('Couldn\'t find attribute "%(name)s" '
+ 'with arguments "%(params)s"') % locals())
+ except IndexError as ex:
+ raise MockException(_('Couldn\'t find invocation num. %(c)d '
+ 'of attribute "%(name)s" with arguments "%(params)s"')
+ % locals())
+
+ return value
def __init__(self, values):
self._values = values
@@ -82,7 +114,13 @@ class Mock(object):
if name.startswith('__') and name.endswith('__'):
return object.__getattribute__(self, name)
else:
- if isinstance(self._values[name], dict):
+ try:
+ isdict = isinstance(self._values[name], dict)
+ except KeyError as ex:
+ raise MockException(_('Couldn\'t find attribute "%s"')
+ % (name))
+
+ if isdict:
def newfunc(*args, **kwargs):
params = serialize_args(args, kwargs)
return self._get_next_ret_value(name, params)
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz
new file mode 100644
index 000000000..f968e2af5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..bd5ced9f8
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
index 009a2d45d..a48a21ca9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
index cb7818abb..c662b602a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
index d4005b336..6a692b3d8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
index 041d7314a..f2ae56be1 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
index cab9cd580..2d24523aa 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz
new file mode 100644
index 000000000..aca0d6f0c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..bbeec53df
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
index 0dfe439ca..3bf9bd13a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
index 17f83545d..62e3fa329 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
index 1ecf26961..36970348a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
index 1c68ad11e..8db997abf 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
index 7d4bae7a9..73f90ac2b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
index c1d101887..3ae9a6f46 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
index 2f30402a9..5b851f9b7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz
new file mode 100644
index 000000000..7a1c47449
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..48583265e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
index 578b33da7..90d6a2ca6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
index 1da1b4dd0..3b17cc74f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
index 67798704f..162f52457 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
index 54585f18c..f88f8bc86 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
index 61ca098cb..f671dc247 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz
new file mode 100644
index 000000000..37892d051
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..9aec45796
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
index 5f5a6c383..ffc21536e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
index 61c59ea1f..b47c49202 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
index 91252758c..78e4292b6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
index b06fd1371..5bc7602a8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
index c6e9722c2..9ba025e55 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz
new file mode 100644
index 000000000..3341bca28
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..56cb9d103
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
index 809332508..81205e04d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
index d4b9d8921..9d1311341 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
index c6124e1e0..a151a99b4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
index 7b7110e06..b1d0b0f3a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
index 6c254032c..c2985c424 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
index 595510cff..2c4901c9f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gz
new file mode 100644
index 000000000..2481a7b3e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
index 3f50a76e0..61cbc1854 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz
new file mode 100644
index 000000000..09b86b24e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
index 35126ad4b..ba89bfd7e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
index 1a34569d1..cfce8c10a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz
new file mode 100644
index 000000000..6092f36ab
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
index 805931bbb..010c07e56 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
index e9852d038..9d3adec48 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz
new file mode 100644
index 000000000..995dde1b5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..12d18d12e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
index a292ad56e..64c756ffa 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
index bc29985bd..d2cefdc37 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
index 21812b0fa..9fdef3b90 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
index 13f51b856..c34d2308b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
index fca5d9328..36a342e7c 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
index b082714cd..1af20acde 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
index 103f00b84..d84122d77 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz
new file mode 100644
index 000000000..d650f40a5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..a03d442a4
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
index 3ab274719..993d9bb2d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
index 9d89a627d..6693c2ce9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
index 2c6fa5a22..07898dd55 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
index 9a54bbb62..56e583449 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz
new file mode 100644
index 000000000..5d4c0e111
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..cb52cb974
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
index 0b6aff86d..8b2ff15f3 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
index 51331083e..aee1fb14d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
index fb5e35662..f926d206f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
index d8c75ba3c..483b23d53 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
index 92bbed9da..14d61039f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
index bb4535336..daecf0156 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz
new file mode 100644
index 000000000..548b88148
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..8545a1833
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
index b2af3e865..c1daf3db9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
index 293c9b3cf..750d68d29 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
index b43ba2897..6e91b72a2 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
index a1b757b60..2d0349d96 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
index f988eca93..6b9ef360a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz
new file mode 100644
index 000000000..3e582226f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..723966011
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
index 4d53ded9b..29b73888b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
index 42e3d8f98..595124af2 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
index e7728c515..03d53be74 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
index a970cc189..2a0663e6f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
index 6b3414f25..e651c02fc 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz
new file mode 100644
index 000000000..a50935649
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..4b07271c1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
index 11910aa8f..f62298ed7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
index a128ac167..12a164f23 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
index b56c849ea..33f1862e6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
index adf7b4648..80853eea4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
index 907cf2e25..5cebe527d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz
new file mode 100644
index 000000000..d0c431b9d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..d231f803d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
index 81877dd6e..25fe5f3ff 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
index 33a72e90e..8be80ba56 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
index ff56a9287..51b6f2df8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
index 682dd6d40..97812405e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
index fba91bfff..20b2e021e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz
new file mode 100644
index 000000000..c32f9ecd2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..672376a0e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
index 1578751ee..aa6f4ca8a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
index 987eeb6da..00f5770a7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
index 27495c884..1631f35df 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
index 80d62a9a4..ec28756ad 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
index 1ad1d60dc..699ccde76 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz
new file mode 100644
index 000000000..2b99fb9cd
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..a43bfeb7e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
index 3855ac0dd..57e74e618 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
index 8f1d273f2..273364d95 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
index 927204978..732a0f2e6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
index 849fd1c8c..d6cb32559 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
index 41aa8ccfb..e44197039 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..456af2816
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
index e69a69a20..93568dcef 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
index d5aa712ac..6a4b90850 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
index db090ad4d..fc816320f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
index ae76e5693..83cf9c071 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
index 8e4e9bd65..93977743f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..f58f80a79
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
index 991858501..18a8aed13 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
index f4a514e5c..4225a72b0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
index 3916fc0fb..363c431d4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz
new file mode 100644
index 000000000..8761703dc
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..fc907ed31
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
index de1f831de..0eca8e6ce 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
index 751668b6f..0886c942d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
index 922fce900..d0fb77bd1 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
index c79c72334..df3961276 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
index 3cedfe1ba..4df451154 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz
new file mode 100644
index 000000000..59724b43d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..4b3711ec0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
index 626398469..2f9a5de9c 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
index 15a83ac0c..8ffa516c0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
index 755cf2e08..6aade88c6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
index d14db9b2f..276c06397 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
index 679287e3a..77a1650d4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz
new file mode 100644
index 000000000..ce19ed290
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..b2dadcd4d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
index ed654b90e..aa378fedd 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
index 5b7ff554d..333a27b89 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
index d89b52377..16ca553f6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
index 764e6c45e..8cf3b564e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
index a63f4881a..0a2c8513b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz
new file mode 100644
index 000000000..ae42d7734
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..4fec34d08
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
index 607047b38..74e8e95a6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
index 4f8b93282..da0528797 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
index 429a96d7e..63f02bc75 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
index ac9c25734..c014d5003 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz
new file mode 100644
index 000000000..592658541
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..892f3c346
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
index 82b3a6185..9996339f5 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
index 741f28905..409ee5ef7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
index 5c633dc73..9e799c196 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
index da8c02d81..848024366 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
index 9e0baf1cd..687952c4c 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
index f647f9516..57988a6b6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz
new file mode 100644
index 000000000..303a47019
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..c211622e1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz
new file mode 100644
index 000000000..5e5303cbc
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz
new file mode 100644
index 000000000..1bcbd48f3
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz
new file mode 100644
index 000000000..ae557d73d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz
new file mode 100644
index 000000000..90ebff4e7
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz
new file mode 100644
index 000000000..beccc2737
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz
new file mode 100644
index 000000000..af5082ab6
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..837d81b70
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz
new file mode 100644
index 000000000..ecea62a01
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz
new file mode 100644
index 000000000..283cd7fdd
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz
new file mode 100644
index 000000000..44dcc89ae
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz
new file mode 100644
index 000000000..5c520c768
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz
new file mode 100644
index 000000000..aec53305d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..a16c88e54
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..d9c4e9c82
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
index cd1356e9e..94aafb39a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
index 8add1aafc..e0ad00bf6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
index c889f9472..00f7839ba 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
index 20a8cad07..77422d3f5 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
index 9fec601ab..414194a9d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz
new file mode 100644
index 000000000..b1e825822
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..1e3d89fea
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz
new file mode 100644
index 000000000..627c78d7e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz
new file mode 100644
index 000000000..e577cdb5e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz
new file mode 100644
index 000000000..72962fc52
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz
new file mode 100644
index 000000000..5d1351a14
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz
new file mode 100644
index 000000000..eb0ed7241
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..c65264688
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..ca40d6413
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
index 4587a6fda..1d8081a3e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
index 48cb908c1..e03633b90 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
index 0d15a012e..00c56dacc 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
index b0b49c932..7381c3cc6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
index 574ce071e..115ed1dd5 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
new file mode 100644
index 000000000..df40b08c0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..b51766f75
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
index c19b6e25e..092a1f933 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
index 1d655bb02..77f333c00 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
index 678b4cd10..8ab166a60 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
index 0884a350b..97e96be17 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
index 128b20ac5..728464ca9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz
new file mode 100644
index 000000000..4aa6d171a
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..df063a22e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
index bc4d4b99d..b30363fcc 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
index 8de7c4e71..1681d9947 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
index ee94dd6c2..4469fd90e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
index 313bcfa06..f94f2ebb9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
index de8064431..03afe2235 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz
new file mode 100644
index 000000000..2f95f62bf
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..2e7ab44ad
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
index e852140a1..eb514d086 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
index f89c63faf..810c9e14d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
index 12cda7550..2eb2a8372 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
index 07445af3e..67311757a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
index 8e21428f2..0779125b3 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz
new file mode 100644
index 000000000..7e6cc708e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..0ce4bbf63
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
index 794d9a09d..9068792c7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
index 775f8232c..9b06cb884 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
index d0c0306f2..e91e6c965 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
index 3cb6c4b7f..271ded270 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
index a48d4aa9b..253bdfc82 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz
new file mode 100644
index 000000000..20486b189
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..be92217ed
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
index 5578f64f8..36059e753 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
index 224ba464f..aea394e9f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
index 29c15fe82..4850d3cda 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
index 9ac16ec7d..99bf1806c 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
index d6244c3fc..87b571e4a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
Binary files differ
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index fa5e770f8..78cd667e4 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -16,24 +16,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake image service"""
+"""Implementation of a fake image service."""
import copy
import datetime
+import uuid
from nova import exception
-from nova import flags
import nova.image.glance
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova import utils
+CONF = cfg.CONF
+CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
-
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
@@ -53,8 +52,8 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel,
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
@@ -67,8 +66,8 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}}
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
@@ -80,8 +79,8 @@ class _FakeImageService(object):
'is_public': True,
'container_format': None,
'disk_format': None,
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}}
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
@@ -93,8 +92,8 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}}
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
@@ -120,8 +119,8 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel,
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
@@ -135,8 +134,8 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel,
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
@@ -178,7 +177,7 @@ class _FakeImageService(object):
:raises: Duplicate if the image already exist.
"""
- image_id = str(metadata.get('id', utils.gen_uuid()))
+ image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 13e090cef..7c13796a6 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -25,13 +25,17 @@ import glanceclient.exc
from nova import context
from nova import exception
from nova.image import glance
+from nova.openstack.common import cfg
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
+from nova.tests import matchers
+
+CONF = cfg.CONF
class NullWriter(object):
- """Used to test ImageService.get which takes a writer object"""
+ """Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
@@ -130,7 +134,7 @@ class TestGlanceImageService(test.TestCase):
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
- """Ensure instance_id is persisted as an image-property"""
+ # Ensure instance_id is persisted as an image-property.
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
@@ -155,10 +159,10 @@ class TestGlanceImageService(test.TestCase):
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
- self.assertDictMatch(image_meta, expected)
+ self.assertThat(image_meta, matchers.DictMatches(expected))
image_metas = self.service.detail(self.context)
- self.assertDictMatch(image_metas[0], expected)
+ self.assertThat(image_metas[0], matchers.DictMatches(expected))
def test_create_without_instance_id(self):
"""
@@ -188,7 +192,7 @@ class TestGlanceImageService(test.TestCase):
'owner': None,
}
actual = self.service.show(self.context, image_id)
- self.assertDictMatch(actual, expected)
+ self.assertThat(actual, matchers.DictMatches(expected))
def test_create(self):
fixture = self._make_fixture(name='test image')
@@ -259,7 +263,7 @@ class TestGlanceImageService(test.TestCase):
'owner': None,
}
- self.assertDictMatch(meta, expected)
+ self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_limit(self):
@@ -315,7 +319,7 @@ class TestGlanceImageService(test.TestCase):
'deleted': None,
'owner': None,
}
- self.assertDictMatch(meta, expected)
+ self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_invalid_marker(self):
@@ -702,3 +706,17 @@ class TestGlanceClientWrapper(test.TestCase):
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
+
+
+class TestGlanceUrl(test.TestCase):
+
+ def test_generate_glance_http_url(self):
+ generated_url = glance.generate_glance_url()
+ http_url = "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
+ self.assertEqual(generated_url, http_url)
+
+ def test_generate_glance_https_url(self):
+ self.flags(glance_protocol="https")
+ generated_url = glance.generate_glance_url()
+ https_url = "https://%s:%d" % (CONF.glance_host, CONF.glance_port)
+ self.assertEqual(generated_url, https_url)
diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py
index 3c92ffb2e..4f8790cc7 100644
--- a/nova/tests/image/test_s3.py
+++ b/nova/tests/image/test_s3.py
@@ -21,6 +21,8 @@ import mox
import os
import tempfile
+import fixtures
+
from nova import context
import nova.db.api
from nova import exception
@@ -83,6 +85,7 @@ class TestS3ImageService(test.TestCase):
def setUp(self):
super(TestS3ImageService, self).setUp()
self.context = context.RequestContext(None, None)
+ self.useFixture(fixtures.FakeLogger('boto'))
# set up one fixture to test shows, should have id '1'
nova.db.api.s3_image_create(self.context,
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
new file mode 100644
index 000000000..3c7e80497
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "%(timestamp)s",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
new file mode 100644
index 000000000..a68b578e9
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(image_id)s" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(image_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(image_id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(image_id)s" type="application/vnd.openstack.image" rel="alternate"/>
+</image>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
new file mode 100644
index 000000000..fc10c3315
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
@@ -0,0 +1,214 @@
+{
+ "images": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "%(timestamp)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "created": "%(timestamp)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "created": "%(timestamp)s",
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(uuid)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "OS-DCF:diskConfig": "MANUAL",
+ "created": "%(timestamp)s",
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(uuid)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "created": "%(timestamp)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "%(id)s",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "created": "%(timestamp)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "created": "%(timestamp)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
new file mode 100644
index 000000000..b0d5218b5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage7" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage123456" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage123456" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage6" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s" OS-DCF:diskConfig="MANUAL">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage123456" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">%(id)s</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage123456" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="%(timestamp)s" name="fakeimage123456" created="%(timestamp)s" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
new file mode 100644
index 000000000..acb9bf531
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
new file mode 100644
index 000000000..52e1a5203
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
new file mode 100644
index 000000000..b239818a8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "rebuild": {
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
new file mode 100644
index 000000000..93bfb0d4e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rebuild xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%(host)s/openstack/images/%(image_id)s"
+ OS-DCF:diskConfig="AUTO" />
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
new file mode 100644
index 000000000..1608b9f6b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
new file mode 100644
index 000000000..b760fe5eb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" adminPass="%(password)s" OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
new file mode 100644
index 000000000..25e51a878
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
new file mode 100644
index 000000000..2671e2b51
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl
new file mode 100644
index 000000000..81b89adf2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server" : {
+ "OS-DCF:diskConfig": "AUTO",
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
new file mode 100644
index 000000000..7c8371f16
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
new file mode 100644
index 000000000..1309e6dfe
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s" OS-DCF:diskConfig="AUTO">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
new file mode 100644
index 000000000..a290485e1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "resize": {
+ "flavorRef": "3",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
new file mode 100644
index 000000000..aa0b0b67a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<resize xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ OS-DCF:diskConfig="AUTO"
+ flavorRef="3" />
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
new file mode 100644
index 000000000..4ac22820d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
new file mode 100644
index 000000000..808884698
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ OS-DCF:diskConfig="AUTO" />
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
new file mode 100644
index 000000000..25e51a878
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
new file mode 100644
index 000000000..3d565b0b3
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s"
+ hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake"
+ tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s"
+ OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
new file mode 100644
index 000000000..48df845f1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s",
+ "OS-EXT-STS:task_state": null,
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(timestamp)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "OS-EXT-STS:vm_state": "active",
+ "tenant_id": "openstack",
+ "progress": 0,
+ "OS-EXT-STS:power_state": 1,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
new file mode 100644
index 000000000..cf273a6bf
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1" >
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl
new file mode 100644
index 000000000..8b97dc28d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl
new file mode 100644
index 000000000..03bee03a6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="%(id)s">
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
new file mode 100644
index 000000000..d1f78c08e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
new file mode 100644
index 000000000..8d992e42d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
new file mode 100644
index 000000000..cf5fb232a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 0,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
new file mode 100644
index 000000000..c7bdd1ca8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="%(flavor_id)s" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
index 3b421b4e1..3422de9f5 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
@@ -2,19 +2,19 @@
"flavor": {
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 0,
- "id": "1",
+ "id": "%(flavor_id)s",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/1",
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
"rel": "bookmark"
}
],
- "name": "m1.tiny",
+ "name": "%(flavor_name)s",
"ram": 512,
"vcpus": 1
}
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
index dfecff1fc..dc143010a 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-EXT-DATA:ephemeral="0">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
-</flavor> \ No newline at end of file
+<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="%(flavor_name)s" id="%(flavor_id)s" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
index ee7696bed..2c79efeaf 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
@@ -6,11 +6,11 @@
"id": "1",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "href": "%(host)s/v2/openstack/flavors/1",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/1",
+ "href": "%(host)s/openstack/flavors/1",
"rel": "bookmark"
}
],
@@ -24,11 +24,11 @@
"id": "2",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "href": "%(host)s/v2/openstack/flavors/2",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/2",
+ "href": "%(host)s/openstack/flavors/2",
"rel": "bookmark"
}
],
@@ -42,11 +42,11 @@
"id": "3",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "href": "%(host)s/v2/openstack/flavors/3",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/3",
+ "href": "%(host)s/openstack/flavors/3",
"rel": "bookmark"
}
],
@@ -60,11 +60,11 @@
"id": "4",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "href": "%(host)s/v2/openstack/flavors/4",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/4",
+ "href": "%(host)s/openstack/flavors/4",
"rel": "bookmark"
}
],
@@ -78,11 +78,11 @@
"id": "5",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "href": "%(host)s/v2/openstack/flavors/5",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/5",
+ "href": "%(host)s/openstack/flavors/5",
"rel": "bookmark"
}
],
@@ -91,4 +91,4 @@
"vcpus": 8
}
]
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
index 7b53621b7..7108ebd79 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
@@ -1,23 +1,23 @@
<?xml version='1.0' encoding='UTF-8'?>
<flavors xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
<flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-EXT-DATA:ephemeral="0">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
</flavor>
<flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" OS-FLV-EXT-DATA:ephemeral="0">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
</flavor>
<flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" OS-FLV-EXT-DATA:ephemeral="0">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
</flavor>
<flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" OS-FLV-EXT-DATA:ephemeral="0">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
</flavor>
<flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" OS-FLV-EXT-DATA:ephemeral="0">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
</flavor>
-</flavors> \ No newline at end of file
+</flavors>
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
index b0e481a62..64385ad68 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
@@ -1,12 +1,12 @@
{
"flavor": {
- "OS-FLV-EXT-DATA:ephemeral": 30,
- "disk": 10,
- "id": "666",
- "name": "flavortest",
+ "name": "%(flavor_name)s",
"ram": 1024,
- "rxtx_factor": 2,
- "swap": 5,
- "vcpus": 2
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "rxtx_factor": 2.0,
+ "OS-FLV-EXT-DATA:ephemeral": 30,
+ "swap": 5
}
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
index ec1ec2e2b..df74ab383 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
@@ -1,11 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
- name="testflavor1"
+ name="%(flavor_name)s"
ram="1024"
vcpus="2"
disk="10"
- id="666"
+ id="%(flavor_id)s"
swap="5"
- rxtx_factor="2"
- OS-FLV-EXT-DATA:ephemeral="30" /> \ No newline at end of file
+ rxtx_factor="2.0"
+ OS-FLV-EXT-DATA:ephemeral="30" />
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
index 9eb2c77ff..3a92dabca 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
@@ -2,19 +2,19 @@
"flavor": {
"OS-FLV-EXT-DATA:ephemeral": 30,
"disk": 10,
- "id": "666",
+ "id": "%(flavor_id)s",
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/flavors/666",
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/flavors/666",
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
"rel": "bookmark"
}
],
- "name": "flavortest",
+ "name": "%(flavor_name)s",
"ram": 1024,
"vcpus": 2
}
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
index c877553a1..df7fc07a3 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="testflavor1" id="666" OS-FLV-EXT-DATA:ephemeral="30">
- <atom:link href="http://openstack.example.com/v2/openstack/flavors/666" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/flavors/666" rel="bookmark"/>
-</flavor> \ No newline at end of file
+<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" OS-FLV-EXT-DATA:ephemeral="30">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/README.rst b/nova/tests/integrated/api_samples/README.rst
index b4e274334..065df1d32 100644
--- a/nova/tests/integrated/api_samples/README.rst
+++ b/nova/tests/integrated/api_samples/README.rst
@@ -5,7 +5,7 @@ Samples in this directory are automatically generated from the api samples
integration tests. To regenerate the samples, simply set GENERATE_SAMPLES
in the environment before running the tests. For example:
- GENERATE_SAMPLES=True ./run_tests.py nova.tests.integrated
+ GENERATE_SAMPLES=True tox -epy27 nova.tests.integrated
If new tests are added or the .tpl files are changed due to bug fixes, the
samples should be regenerated so they are in sync with the templates.
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 9581a0e7e..fe0613646 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -73,6 +73,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-agents",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Agents",
+ "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-availability-zone",
"description": "%(text)s",
"links": [],
@@ -81,6 +89,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-cells",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Cells",
+ "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-certificates",
"description": "%(text)s",
"links": [],
@@ -97,6 +113,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-cloudpipe-update",
+ "description": "%(text)s",
+ "links": [],
+ "name": "CloudpipeUpdate",
+ "namespace": "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-config-drive",
"description": "%(text)s",
"links": [],
@@ -121,6 +145,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-coverage",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Coverage",
+ "namespace": "http://docs.openstack.org/compute/ext/coverage/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-create-server-ext",
"description": "%(text)s",
"links": [],
@@ -137,6 +169,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-fixed-ips",
+ "description": "Fixed IPs support.",
+ "links": [],
+ "name": "FixedIPs",
+ "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2",
+ "updated": "2012-10-18T13:25:27-06:00"
+ },
+ {
"alias": "os-flavor-access",
"description": "%(text)s",
"links": [],
@@ -201,6 +241,22 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-floating-ips-bulk",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FloatingIpsBulk",
+ "namespace": "http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "alias": "os-hide-server-addresses",
+ "description": "Support hiding server addresses in certain states.",
+ "links": [],
+ "name": "HideServerAddresses",
+ "namespace": "http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1",
+ "updated": "2012-12-11T00:00:00+00:00"
+ },
+ {
"alias": "os-hosts",
"description": "%(text)s",
"links": [],
@@ -209,6 +265,22 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-services",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Services",
+ "namespace": "http://docs.openstack.org/compute/ext/services/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "alias": "os-fping",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Fping",
+ "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-hypervisors",
"description": "%(text)s",
"links": [],
@@ -245,7 +317,23 @@
"description": "%(text)s",
"links": [],
"name": "Networks",
- "namespace": "http://docs.openstack.org/compute/ext/networks/api/v1.1",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "alias": "os-tenant-networks",
+ "description": "%(text)s",
+ "links": [],
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "alias": "os-networks-associate",
+ "description": "%(text)s",
+ "links": [],
+ "name": "NetworkAssociationSupport",
+ "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2",
"updated": "%(timestamp)s"
},
{
@@ -289,6 +377,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-server-password",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerPassword",
+ "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
+ "updated": "2012-11-29T00:00:00+00:00"
+ },
+ {
"alias": "os-server-start-stop",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index e8246aad8..2051d891a 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -30,12 +30,21 @@
<extension alias="os-availability-zone" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
<description>%(text)s</description>
</extension>
+ <extension alias="os-agents" name="Agents" namespace="http://docs.openstack.org/compute/ext/agents/api/v2" updated="%(timestamp)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-cells" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-certificates" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates">
<description>%(text)s</description>
</extension>
<extension alias="os-cloudpipe" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1" name="Cloudpipe">
<description>%(text)s</description>
</extension>
+ <extension alias="os-cloudpipe-update" updated="%(timestamp)s" name="CloudpipeUpdate" namespace="http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-config-drive" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/config_drive/api/v1.1" name="ConfigDrive">
<description>%(text)s</description>
</extension>
@@ -45,12 +54,18 @@
<extension alias="os-consoles" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-consoles/api/v2" name="Consoles">
<description>%(text)s</description>
</extension>
+ <extension alias="os-coverage" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/coverage/api/v2" name="Coverage">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-create-server-ext" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/createserverext/api/v1.1" name="Createserverext">
<description>%(text)s</description>
</extension>
<extension alias="os-deferred-delete" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
<description>%(text)s</description>
</extension>
+ <extension alias="os-fixed-ips" name="FixedIPs" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" updated="2012-10-18T13:25:27-06:00">
+ <description>Fixed IPs support.</description>
+ </extension>
<extension alias="os-flavor-access" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/flavor_access/api/v2" name="FlavorAccess">
<description>%(text)s</description>
</extension>
@@ -75,9 +90,21 @@
<extension alias="os-floating-ips" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/floating_ips/api/v1.1" name="FloatingIps">
<description>%(text)s</description>
</extension>
+ <extension alias="os-floating-ips-bulk" name="FloatingIpsBulk" namespace="http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2" updated="2012-10-29T13:25:27-06:00">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-hide-server-addresses" updated="2012-12-11T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1" name="HideServerAddresses">
+ <description>Support hiding server addresses in certain states.</description>
+ </extension>
<extension alias="os-hosts" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/hosts/api/v1.1" name="Hosts">
<description>%(text)s</description>
</extension>
+ <extension alias="os-services" name="Services" namespace="http://docs.openstack.org/compute/ext/services/api/v2" updated="%(timestamp)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-fping" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/fping/api/v1.1" name="Fping">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-hypervisors" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/hypervisors/api/v1.1" name="Hypervisors">
<description>%(text)s</description>
</extension>
@@ -90,7 +117,13 @@
<extension alias="os-multiple-create" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>%(text)s</description>
</extension>
- <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks/api/v1.1" name="Networks">
+ <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-tenant-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>%(text)s</description>
</extension>
<extension alias="os-quota-class-sets" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
@@ -108,6 +141,9 @@
<extension alias="os-server-diagnostics" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1" name="ServerDiagnostics">
<description>%(text)s</description>
</extension>
+ <extension alias="os-server-password" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/server-password/api/v2" name="ServerPassword">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-server-start-stop" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/api_samples/limit-get-resp.json.tpl b/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
index 376352ec4..a86d5faa2 100644
--- a/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
@@ -10,8 +10,6 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"maxSecurityGroups": 10,
"maxSecurityGroupRules": 20
},
@@ -68,6 +66,19 @@
],
"regex": ".*changes-since.*",
"uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(timestamp)s",
+ "remaining": 12,
+ "unit": "HOUR",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
}
]
}
diff --git a/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl b/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
index 25de4734a..6f92bcee6 100644
--- a/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
@@ -12,6 +12,9 @@
<rate regex=".*changes-since.*" uri="*changes-since*">
<limit next-available="%(timestamp)s" unit="MINUTE" verb="GET" remaining="3" value="3"/>
</rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(timestamp)s" unit="HOUR" verb="GET" remaining="12" value="12"/>
+ </rate>
</rates>
<absolute>
<limit name="maxServerMeta" value="128"/>
@@ -21,11 +24,9 @@
<limit name="maxPersonalitySize" value="10240"/>
<limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="maxSecurityGroups" value="10"/>
<limit name="maxTotalCores" value="20"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
</limits>
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl b/nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl
new file mode 100644
index 000000000..6dbd2f17c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "agent": {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl
new file mode 100644
index 000000000..5c777749a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <hypervisor>%(hypervisor)s</hypervisor>
+ <os>%(os)s</os>
+ <architecture>%(architecture)s</architecture>
+ <version>%(version)s</version>
+ <md5hash>%(md5hash)s</md5hash>
+ <url>%(url)s</url>
+</agent>
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl
new file mode 100644
index 000000000..abe83564f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl
@@ -0,0 +1,12 @@
+{
+ "agent": {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s",
+ "agent_id": "%(agent_id)d"
+ }
+}
+
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
new file mode 100644
index 000000000..ecf97b91e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <url>%(url)s</url>
+ <hypervisor>%(hypervisor)s</hypervisor>
+ <md5hash>%(md5hash)s</md5hash>
+ <version>%(version)s</version>
+ <architecture>%(architecture)s</architecture>
+ <os>%(os)s</os>
+ <agent_id>%(agent_id)d</agent_id>
+</agent>
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl b/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
new file mode 100644
index 000000000..d447350e0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "para": {
+ "url": "%(url)s",
+ "md5hash": "%(md5hash)s",
+ "version": "%(version)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl b/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
new file mode 100644
index 000000000..19751dc80
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<para>
+ <version>%(version)s</version>
+ <url>%(url)s</url>
+ <md5hash>%(md5hash)s</md5hash>
+</para>
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
new file mode 100644
index 000000000..110e52cd3
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "agent": {
+ "agent_id": "%(agent_id)d",
+ "url": "%(url)s",
+ "md5hash": "%(md5hash)s",
+ "version": "%(version)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl b/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
new file mode 100644
index 000000000..2c9e50572
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <agent_id>%(agent_id)d</agent_id>
+ <version>%(version)s</version>
+ <url>%(url)s</url>
+ <md5hash>%(md5hash)s</md5hash>
+</agent>
diff --git a/nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl
new file mode 100644
index 000000000..dac1f76ff
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "agents": [
+ {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s",
+ "agent_id": "%(agent_id)d"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
new file mode 100644
index 000000000..fbbbdad28
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<agents>
+ <agent hypervisor="%(hypervisor)s" os="%(os)s" architecture="%(architecture)s" version="%(version)s" md5hash="%(md5hash)s" url="%(url)s" agent_id="%(agent_id)d"/>
+</agents>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
index 89a48ee57..119f78ad2 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
@@ -6,7 +6,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova2"
+ },
"name": "newname",
"updated_at": "%(timestamp)s"
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
index 3f72a0b43..071e1c43a 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
@@ -8,5 +8,7 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova2</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
index ee0ea6c3d..b311bb18e 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
@@ -8,7 +8,9 @@
"%(compute_host)s"
],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
index 82a0401ad..a45a01789 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
@@ -10,5 +10,7 @@
</hosts>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
index 8ce7d9c40..6b94465c4 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
@@ -6,7 +6,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
index 56f0dd3e8..d59d10a84 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
@@ -8,5 +8,7 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
index f373f02f7..bed47e730 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
@@ -7,7 +7,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
index 417b1016f..0a6173a0b 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
@@ -9,6 +9,8 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
</aggregates>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
index 058a1ecf5..f34932617 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
@@ -7,6 +7,7 @@
"hosts": [],
"id": 1,
"metadata": {
+ "availability_zone": "nova",
"key": "value"
},
"name": "name",
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
index 9bbd1f0bd..5b229cfc9 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
@@ -10,5 +10,6 @@
<id>1</id>
<metadata>
<key>value</key>
+ <availability_zone>nova</availability_zone>
</metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
index 8ce7d9c40..6b94465c4 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
@@ -6,7 +6,9 @@
"deleted_at": null,
"hosts": [],
"id": 1,
- "metadata": {},
+ "metadata": {
+ "availability_zone": "nova"
+ },
"name": "name",
"updated_at": null
}
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
index 56f0dd3e8..d59d10a84 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
@@ -8,5 +8,7 @@
<hosts/>
<deleted_at>None</deleted_at>
<id>1</id>
- <metadata/>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
</aggregate>
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl
new file mode 100644
index 000000000..2993b1df8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "cell": {
+ "name": "cell3",
+ "username": "username3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
new file mode 100644
index 000000000..d31a674a2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/>
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
new file mode 100644
index 000000000..b16e12cd6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
@@ -0,0 +1,4 @@
+{
+ "cells": []
+}
+
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
new file mode 100644
index 000000000..32fef4f04
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/>
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl
new file mode 100644
index 000000000..3d7a6c207
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl
@@ -0,0 +1,39 @@
+{
+ "cells": [
+ {
+ "name": "cell1",
+ "username": "username1",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ },
+ {
+ "name": "cell2",
+ "username": "username2",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent"
+ },
+ {
+ "name": "cell3",
+ "username": "username3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ },
+ {
+ "name": "cell4",
+ "username": "username4",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent"
+ },
+ {
+ "name": "cell5",
+ "username": "username5",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
new file mode 100644
index 000000000..58312201f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
+ <cell name="cell1" username="username1" rpc_port="None" rpc_host="None" type="child"/>
+ <cell name="cell2" username="username2" rpc_port="None" rpc_host="None" type="parent"/>
+ <cell name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/>
+ <cell name="cell4" username="username4" rpc_port="None" rpc_host="None" type="parent"/>
+ <cell name="cell5" username="username5" rpc_port="None" rpc_host="None" type="child"/>
+</cells>
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl b/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
new file mode 100644
index 000000000..0ab9141ae
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "configure_project": {
+ "vpn_ip": "%(vpn_ip)s",
+ "vpn_port": "%(vpn_port)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl b/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
new file mode 100644
index 000000000..34d2be9df
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<configure_project>
+ <vpn_ip>%(vpn_ip)s</vpn_ip>
+ <vpn_port>%(vpn_port)s</vpn_port>
+</configure_project>
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
new file mode 100644
index 000000000..1926119ce
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getVNCConsole": {
+ "type": "novnc"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
new file mode 100644
index 000000000..c1f73180e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getVNCConsole>
+ <type>novnc</type>
+</os-getVNCConsole>
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
new file mode 100644
index 000000000..3cf725575
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "novnc",
+ "url":"%(url)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
new file mode 100644
index 000000000..d4904aa9a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>novnc</type>
+ <url>%(url)s</url>
+</console>
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.json.tpl
new file mode 100644
index 000000000..c228b05f0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "report" : {
+ "file" : "%(filename)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.xml.tpl
new file mode 100644
index 000000000..02b8fc97c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<report>
+ <file>%(filename)s</file>
+</report>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.json.tpl
new file mode 100644
index 000000000..6cdd1f37d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "path" : "%(path)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.xml.tpl
new file mode 100644
index 000000000..51b9773d3
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-report-post-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<path>%(path)s</path>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.json.tpl
new file mode 100644
index 000000000..296049df9
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "start" : {
+ "combine": true
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.xml.tpl
new file mode 100644
index 000000000..11bdb44f9
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-start-combine-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<start>
+ <combine>True</combine>
+</start>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.json.tpl
new file mode 100644
index 000000000..a046b9207
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "start" : {
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.xml.tpl
new file mode 100644
index 000000000..aeaff102a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-start-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<start></start>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.json.tpl
new file mode 100644
index 000000000..c46e49cca
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "stop" : {
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.xml.tpl
new file mode 100644
index 000000000..1dd5c7bc7
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<stop></stop>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.json.tpl
new file mode 100644
index 000000000..6cdd1f37d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "path" : "%(path)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.xml.tpl
new file mode 100644
index 000000000..65f5e16bc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-stop-post-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<path>%(path)s</path>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.json.tpl
new file mode 100644
index 000000000..906d629ed
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "report": {
+ "xml": true,
+ "file": "%(filename)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.xml.tpl
new file mode 100644
index 000000000..902e0e537
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<report>
+ <file>%(filename)s</file>
+ <xml>True</xml>
+</report>
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.json.tpl
new file mode 100644
index 000000000..6cdd1f37d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "path" : "%(path)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.xml.tpl
new file mode 100644
index 000000000..65f5e16bc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-coverage/coverage-xml-report-post-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<path>%(path)s</path>
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
new file mode 100644
index 000000000..d3562d390
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "forceDelete": null
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
new file mode 100644
index 000000000..31928207e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<forceDelete />
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
new file mode 100644
index 000000000..d38291fe0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "restore": null
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
new file mode 100644
index 000000000..8a95b4fcc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<restore />
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
new file mode 100644
index 000000000..85ae4890a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "reserve": "%(reserve)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
new file mode 100644
index 000000000..3896b24eb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reserve>%(reserve)s</reserve>
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
new file mode 100644
index 000000000..a3d11475b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "fixed_ip": {
+ "cidr": "%(cidr)s",
+ "hostname": "%(hostname)s",
+ "host": "%(host)s",
+ "address": "%(address)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
new file mode 100644
index 000000000..3e9598f34
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<fixed_ip>
+ <cidr>%(cidr)s</cidr>
+ <hostname>%(hostname)s</hostname>
+ <host>%(host)s</host>
+ <address>%(address)s</address>
+</fixed_ip>
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
new file mode 100644
index 000000000..5383e5d15
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
new file mode 100644
index 000000000..764cebe8e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor>
+ <name>%(flavor_name)s</name>
+ <ram>1024</ram>
+ <vcpus>2</vcpus>
+ <disk>10</disk>
+ <id>%(flavor_id)s</id>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
new file mode 100644
index 000000000..ae0ce80ba
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
new file mode 100644
index 000000000..156ef215e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
new file mode 100644
index 000000000..a7f3a1993
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
new file mode 100644
index 000000000..4feec740c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
new file mode 100644
index 000000000..5b27e1385
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "rxtx_factor": 1.0,
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "rxtx_factor": 1.0,
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "rxtx_factor": 1.0,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
new file mode 100644
index 000000000..fb2ee09e0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
new file mode 100644
index 000000000..70d0a57de
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "rxtx_factor": 2.0
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
new file mode 100644
index 000000000..a87b47670
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ rxtx_factor="2.0" />
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
new file mode 100644
index 000000000..abf652fae
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "rxtx_factor": 2.0,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
new file mode 100644
index 000000000..d24623c55
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" rxtx_factor="2.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
new file mode 100644
index 000000000..be9afe012
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 0,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
new file mode 100644
index 000000000..53afae086
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="0" vcpus="1" ram="512" name="%(flavor_name)s" id="%(flavor_id)s" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
new file mode 100644
index 000000000..c46a1695d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
new file mode 100644
index 000000000..ced8e1779
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="0" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
new file mode 100644
index 000000000..ca86aeb4e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "swap": 5
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
new file mode 100644
index 000000000..5f54df5cd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ swap="5" />
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
new file mode 100644
index 000000000..e61a08dc1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
new file mode 100644
index 000000000..e8c69ecee
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" swap="5">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
new file mode 100644
index 000000000..676859de9
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "floating_ips_bulk_create" :
+ {
+ "ip_range": "%(ip_range)s",
+ "pool": "%(pool)s",
+ "interface": "%(interface)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
new file mode 100644
index 000000000..ebe0b9aa9
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_create>
+<ip_range>%(ip_range)s</ip_range>
+<pool>%(pool)s</pool>
+<interface>%(interface)s</interface>
+</floating_ips_bulk_create>
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
new file mode 100644
index 000000000..ef1cbfb17
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "floating_ips_bulk_create": {
+ "interface": "eth0",
+ "ip_range": "192.168.1.0/24",
+ "pool": "nova"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
new file mode 100644
index 000000000..db80bbfc1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_create>
+ <interface>eth0</interface>
+ <ip_range>192.168.1.0/24</ip_range>
+ <pool>nova</pool>
+</floating_ips_bulk_create> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
new file mode 100644
index 000000000..d630d669c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "ip_range": "%(ip_range)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
new file mode 100644
index 000000000..27a6b0e95
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<ip_range>%(ip_range)s</ip_range>
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
new file mode 100644
index 000000000..166984b24
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "floating_ips_bulk_delete": "192.168.1.0/24"
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
new file mode 100644
index 000000000..3d77af334
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_delete>192.168.1.0/24</floating_ips_bulk_delete> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
new file mode 100644
index 000000000..0eaaf75ae
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
new file mode 100644
index 000000000..4c3c8cd9c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_info>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.3</address>
+ </item>
+</floating_ip_info> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
new file mode 100644
index 000000000..de1e622bb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
@@ -0,0 +1,25 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.1",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.2",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
new file mode 100644
index 000000000..6ef85bd87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_info>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.1</address>
+ </item>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.2</address>
+ </item>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.3</address>
+ </item>
+</floating_ip_info> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
new file mode 100644
index 000000000..86e39aedc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
@@ -0,0 +1,54 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
new file mode 100644
index 000000000..adc8a5c1b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
new file mode 100644
index 000000000..e244ea0df
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
new file mode 100644
index 000000000..568807ecb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
new file mode 100644
index 000000000..8b97dc28d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
new file mode 100644
index 000000000..03bee03a6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="%(id)s">
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
new file mode 100644
index 000000000..4ed89a182
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "reboot"
+}
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
new file mode 100644
index 000000000..4f9c8e437
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="%(host_name)s" power_action="reboot"/>
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl
new file mode 100644
index 000000000..efb234b43
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "host": [
+ {
+ "resource": {
+ "cpu": 1,
+ "disk_gb": 1028,
+ "host": "%(host_name)s",
+ "memory_mb": 8192,
+ "project": "(total)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "%(host_name)s",
+ "memory_mb": 512,
+ "project": "(used_now)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "%(host_name)s",
+ "memory_mb": 0,
+ "project": "(used_max)"
+ }
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
new file mode 100644
index 000000000..e162734ba
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host>
+ <resource>
+ <project>(total)</project>
+ <memory_mb>8192</memory_mb>
+ <host>%(host_name)s</host>
+ <cpu>1</cpu>
+ <disk_gb>1028</disk_gb>
+ </resource>
+ <resource>
+ <project>(used_now)</project>
+ <memory_mb>512</memory_mb>
+ <host>%(host_name)s</host>
+ <cpu>0</cpu>
+ <disk_gb>0</disk_gb>
+ </resource>
+ <resource>
+ <project>(used_max)</project>
+ <memory_mb>0</memory_mb>
+ <host>%(host_name)s</host>
+ <cpu>0</cpu>
+ <disk_gb>0</disk_gb>
+ </resource>
+</host>
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
new file mode 100644
index 000000000..c0df4481a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "shutdown"
+}
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
new file mode 100644
index 000000000..d78bd32a5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="%(host_name)s" power_action="shutdown"/>
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl
new file mode 100644
index 000000000..90f5ac7bc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "startup"
+}
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
new file mode 100644
index 000000000..581f7cf07
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="%(host_name)s" power_action="startup"/>
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
new file mode 100644
index 000000000..6accac164
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "status": "enable",
+ "maintenance_mode": "disable"
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
new file mode 100644
index 000000000..d127a7a26
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<updates>
+ <status>enable</status>
+ <maintenance_mode>disable</maintenance_mode>
+</updates>
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
new file mode 100644
index 000000000..92f73892b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "host": "%(host_name)s",
+ "maintenance_mode": "off_maintenance",
+ "status": "enabled"
+}
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
new file mode 100644
index 000000000..e9c99512b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host status="enabled" maintenance_mode="off_maintenance" host="%(host_name)s"/>
diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
new file mode 100644
index 000000000..504f66f59
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "hosts": [
+ {
+ "host_name": "%(host_name)s",
+ "service": "compute",
+ "zone": "nova"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cert",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "network",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "scheduler",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "conductor",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cells",
+ "zone": "internal"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
new file mode 100644
index 000000000..4e9d3195d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hosts>
+ <host host_name="%(host_name)s" service="compute"/>
+ <host host_name="%(host_name)s" service="cert"/>
+ <host host_name="%(host_name)s" service="network"/>
+ <host host_name="%(host_name)s" service="scheduler"/>
+ <host host_name="%(host_name)s" service="conductor"/>
+ <host host_name="%(host_name)s" service="cells"/>
+</hosts>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
new file mode 100644
index 000000000..762e88175
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "associate_host": "%(host)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
new file mode 100644
index 000000000..7c96c96a1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<associate_host>%(host)s</associate_host>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
new file mode 100644
index 000000000..46f69b3e8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_host": null
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
new file mode 100644
index 000000000..910504a44
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate_host/>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
new file mode 100644
index 000000000..63b6eb683
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_project": null
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
new file mode 100644
index 000000000..d4162c19e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate_project/>
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
new file mode 100644
index 000000000..2e09d15a6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+}
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
new file mode 100644
index 000000000..c26f7b61a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate/>
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
new file mode 100644
index 000000000..4b430ad7c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_class_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "%(set_id)s",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
new file mode 100644
index 000000000..3dffd47f0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set id="%(set_id)s">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
new file mode 100644
index 000000000..f074c829f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
@@ -0,0 +1,15 @@
+{
+ "quota_class_set": {
+ "instances": 50,
+ "cores": 50,
+ "ram": 51200,
+ "floating_ips": 10,
+ "metadata_items": 128,
+ "injected_files": 5,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "security_groups": 10,
+ "security_group_rules": 20,
+ "key_pairs": 100
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
new file mode 100644
index 000000000..d14785482
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
new file mode 100644
index 000000000..99a11f4ff
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "quota_class_set": {
+ "cores": 50,
+ "floating_ips": 10,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 50,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
new file mode 100644
index 000000000..44c658a41
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
new file mode 100644
index 000000000..ee1f6a397
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
new file mode 100644
index 000000000..6a39c8506
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
new file mode 100644
index 000000000..ee1f6a397
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
new file mode 100644
index 000000000..6a39c8506
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
new file mode 100644
index 000000000..1f12caa04
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
new file mode 100644
index 000000000..596ce56ac
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
new file mode 100644
index 000000000..c16dc6bb5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
new file mode 100644
index 000000000..126c3fced
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl
new file mode 100644
index 000000000..026f15d46
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "password": "%(encrypted_password)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
new file mode 100644
index 000000000..046eed30f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<password>%(encrypted_password)s</password>
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
new file mode 100644
index 000000000..757084d2f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
@@ -0,0 +1,14 @@
+{
+ "networks": [
+ {
+ "cidr": "10.0.0.0/29",
+ "id": "%(id)s",
+ "label": "test_0"
+ },
+ {
+ "cidr": "10.0.0.8/29",
+ "id": "%(id)s",
+ "label": "test_1"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
new file mode 100644
index 000000000..fb1c2d3d0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "network": {
+ "label": "public",
+ "cidr": "172.0.0.0/24",
+ "vlan_start": 1,
+ "num_networks": 1,
+ "network_size": 255
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
new file mode 100644
index 000000000..ff9e2273d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "cidr": "172.0.0.0/24",
+ "id": "%(id)s",
+ "label": "public"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
index 9c927eeef..d83dd87c3 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
@@ -12,15 +12,11 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
- "totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
- "totalVolumeGigabytesUsed": 0,
- "totalVolumesUsed": 0
+ "totalFloatingIpsUsed": 0
},
"rate": [
{
@@ -75,6 +71,19 @@
],
"regex": ".*changes-since.*",
"uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(timestamp)s",
+ "remaining": 12,
+ "unit": "HOUR",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
}
]
}
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
index a11a577d0..c1b907670 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
@@ -12,6 +12,9 @@
<rate regex=".*changes-since.*" uri="*changes-since*">
<limit next-available="%(timestamp)s" unit="MINUTE" verb="GET" remaining="3" value="3"/>
</rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(timestamp)s" unit="HOUR" verb="GET" remaining="12" value="12"/>
+ </rate>
</rates>
<absolute>
<limit name="maxServerMeta" value="128"/>
@@ -19,20 +22,16 @@
<limit name="maxPersonality" value="5"/>
<limit name="maxImageMeta" value="128"/>
<limit name="maxPersonalitySize" value="10240"/>
- <limit name="totalVolumesUsed" value="0"/>
<limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
<limit name="totalCoresUsed" value="0"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="totalRAMUsed" value="0"/>
<limit name="totalInstancesUsed" value="0"/>
<limit name="maxSecurityGroups" value="10"/>
- <limit name="totalVolumeGigabytesUsed" value="0"/>
<limit name="maxTotalCores" value="20"/>
<limit name="totalSecurityGroupsUsed" value="0"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="totalKeyPairsUsed" value="0"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
</limits>
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index b1b2c076e..f17dc025f 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -21,18 +21,22 @@ Provides common functionality for integrated unit tests
import random
import string
+import uuid
import nova.image.glance
+from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova import service
-from nova import test # For the flags
+from nova import test
from nova.tests import fake_crypto
import nova.tests.image.fake
from nova.tests.integrated.api import client
-from nova import utils
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
def generate_random_alphanumeric(length):
@@ -67,16 +71,19 @@ class _IntegratedTestBase(test.TestCase):
self.flags(**f)
self.flags(verbose=True)
- self.stub_module('crypto', fake_crypto)
+ self.useFixture(test.ReplaceModule('crypto', fake_crypto))
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.flags(scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
# set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('cert')
self.network = self.start_service('network')
self.scheduler = self.start_service('scheduler')
+ self.cells = self.start_service('cells', manager=CONF.cells.manager)
self._start_api_service()
@@ -116,7 +123,7 @@ class _IntegratedTestBase(test.TestCase):
return generate_new_element(server_names, 'server')
def get_invalid_image(self):
- return str(utils.gen_uuid())
+ return str(uuid.uuid4())
def _build_minimal_create_server_request(self):
server = {}
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 81c8e91c0..7c3157872 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -15,30 +15,47 @@
import base64
import datetime
+import inspect
+import json
import os
import re
import urllib
-import uuid
+import uuid as uuid_lib
+from coverage import coverage
from lxml import etree
+from nova.api.metadata import password
+from nova.api.openstack.compute.contrib import coverage_ext
+# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.cloudpipe.pipelib import CloudPipe
-from nova.compute import api
from nova import context
from nova import db
-from nova import flags
-from nova.network.manager import NetworkManager
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.network import api as network_api
+from nova.network import manager as network_manager
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common.log import logging
from nova.openstack.common import timeutils
+import nova.quota
from nova.scheduler import driver
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests.integrated import integrated_helpers
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.extensions')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
LOG = logging.getLogger(__name__)
@@ -68,6 +85,9 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
indent=4)
else:
+ if data is None:
+ # Likely from missing XML file.
+ return ""
xml = etree.XML(data)
data = etree.tostring(xml, encoding="UTF-8",
xml_declaration=True, pretty_print=True)
@@ -121,9 +141,10 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
return cls._get_sample_path(name, dirname, suffix='.tpl')
def _read_template(self, name):
+
template = self._get_template(name)
if self.generate_samples and not os.path.exists(template):
- with open(template, 'w') as outf:
+ with open(template, 'w'):
pass
with open(template) as inf:
return inf.read().strip()
@@ -198,14 +219,52 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
'%(expected)s\n%(result)s') % locals())
return matched_value
+ def _verify_something(self, subs, expected, data):
+ result = self._pretty_data(data)
+ result = self._objectify(result)
+ return self._compare_result(subs, expected, result)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ """Give the test a chance to modify subs after the server response
+ was verified, and before the on-disk doc/api_samples file is checked.
+ This may be needed by some tests to convert exact matches expected
+ from the server into pattern matches to verify what is in the
+ sample file.
+
+ If there are no changes to be made, subs is returned unharmed.
+ """
+ return subs
+
def _verify_response(self, name, subs, response):
expected = self._read_template(name)
expected = self._objectify(expected)
- result = self._pretty_data(response.read())
- if self.generate_samples:
- self._write_sample(name, result)
- result = self._objectify(result)
- return self._compare_result(subs, expected, result)
+ response_data = response.read()
+ try:
+ with file(self._get_sample(name)) as sample:
+ sample_data = sample.read()
+ except IOError:
+ if self.ctype == 'json':
+ sample_data = "{}"
+ else:
+ sample_data = None
+
+ try:
+ response_result = self._verify_something(subs, expected,
+ response_data)
+ # NOTE(danms): replace some of the subs with patterns for the
+ # doc/api_samples check, which won't have things like the
+ # correct compute host name. Also let the test do some of its
+ # own generalization, if necessary
+ vanilla_regexes = self._get_regexes()
+ subs['compute_host'] = vanilla_regexes['host_name']
+ subs['id'] = vanilla_regexes['id']
+ subs = self.generalize_subs(subs, vanilla_regexes)
+ self._verify_something(subs, expected, sample_data)
+ return response_result
+ except NoMatch:
+ if self.generate_samples:
+ self._write_sample(name, self._pretty_data(response_data))
+ raise
def _get_host(self):
return 'http://openstack.example.com'
@@ -243,6 +302,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
# '[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:'
# '[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}',
'host': self._get_host(),
+ 'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
@@ -260,8 +320,9 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
def _do_post(self, url, name, subs, method='POST'):
body = self._read_template(name) % subs
- if self.generate_samples:
- self._write_sample(name, body)
+ sample = self._get_sample(name)
+ if self.generate_samples and not os.path.exists(sample):
+ self._write_sample(name, body)
return self._get_response(url, method, body)
def _do_put(self, url, name, subs):
@@ -271,6 +332,71 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
return self._get_response(url, 'DELETE')
+class ApiSamplesTrap(ApiSampleTestBase):
+ """Make sure extensions don't get added without tests."""
+
+ all_extensions = True
+
+ def _get_extensions_tested(self):
+ tests = []
+ for attr in globals().values():
+ if not inspect.isclass(attr):
+ continue # Skip non-class objects
+ if not issubclass(attr, integrated_helpers._IntegratedTestBase):
+ continue # Skip non-test classes
+ if attr.extension_name is None:
+ continue # Skip base tests
+ cls = importutils.import_class(attr.extension_name)
+ tests.append(cls.alias)
+ return tests
+
+ def _get_extensions(self):
+ extensions = []
+ response = self._do_get('extensions')
+ for extension in jsonutils.loads(response.read())['extensions']:
+ extensions.append(str(extension['alias']))
+ return extensions
+
+ def test_all_extensions_have_samples(self):
+ # NOTE(danms): This is a list of extensions which are currently
+ # in the tree but that don't (yet) have tests. This list should
+ # NOT be allowed to grow, and should shrink to zero (and be
+ # removed) soon.
+ do_not_approve_additions = []
+ do_not_approve_additions.append('NMN')
+ do_not_approve_additions.append('os-config-drive')
+ do_not_approve_additions.append('os-create-server-ext')
+ do_not_approve_additions.append('os-flavor-access')
+ do_not_approve_additions.append('os-flavor-extra-specs')
+ do_not_approve_additions.append('os-floating-ip-dns')
+ do_not_approve_additions.append('os-floating-ip-pools')
+ do_not_approve_additions.append('os-fping')
+ do_not_approve_additions.append('os-hypervisors')
+ do_not_approve_additions.append('os-instance_usage_audit_log')
+ do_not_approve_additions.append('os-networks')
+ do_not_approve_additions.append('os-services')
+ do_not_approve_additions.append('os-volumes')
+
+ tests = self._get_extensions_tested()
+ extensions = self._get_extensions()
+ missing_tests = []
+ for extension in extensions:
+ # NOTE(danms): if you add tests, remove it from the
+ # exclusions list
+ self.assertFalse(extension in do_not_approve_additions and
+ extension in tests)
+
+ # NOTE(danms): if you add an extension, it must come with
+ # api_samples tests!
+ if (extension not in tests and
+ extension not in do_not_approve_additions):
+ missing_tests.append(extension)
+
+ if missing_tests:
+ LOG.error("Extensions are missing tests: %s" % missing_tests)
+ self.assertEqual(missing_tests, [])
+
+
class VersionsSampleJsonTest(ApiSampleTestBase):
def test_versions_get(self):
response = self._do_get('', strip_version=True)
@@ -338,6 +464,16 @@ class ServersSampleAllExtensionXmlTest(ServersSampleXmlTest):
all_extensions = True
+class ServersSampleHideAddressesJsonTest(ServersSampleJsonTest):
+ extension_name = '.'.join(('nova.api.openstack.compute.contrib',
+ 'hide_server_addresses',
+ 'Hide_server_addresses'))
+
+
+class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest):
+ ctype = 'xml'
+
+
class ServersMetadataJsonTest(ServersSampleBase):
def _create_and_set(self, subs):
uuid = self._post_server()
@@ -349,13 +485,17 @@ class ServersMetadataJsonTest(ServersSampleBase):
return uuid
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['value'] = '(Foo|Bar) Value'
+ return subs
+
def test_metadata_put_all(self):
- """Test setting all metadata for a server"""
+ # Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
return self._create_and_set(subs)
def test_metadata_post_all(self):
- """Test updating all metadata for a server"""
+ # Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
@@ -366,7 +506,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_get_all(self):
- """Test getting all metadata for a server"""
+ # Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
@@ -374,7 +514,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_put(self):
- """Test putting an individual metadata item for a server"""
+ # Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
@@ -385,7 +525,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_get(self):
- """Test getting an individual metadata item for a server"""
+ # Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
@@ -393,7 +533,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_delete(self):
- """Test deleting an individual metadata item for a server"""
+ # Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
@@ -407,14 +547,14 @@ class ServersMetadataXmlTest(ServersMetadataJsonTest):
class ServersIpsJsonTest(ServersSampleBase):
def test_get(self):
- """Test getting a server's IP information"""
+ # Test getting a server's IP information.
uuid = self._post_server()
response = self._do_get('servers/%s/ips' % uuid)
subs = self._get_regexes()
return self._verify_response('server-ips-resp', subs, response)
def test_get_by_network(self):
- """Test getting a server's IP information by network id"""
+ # Test getting a server's IP information by network id.
uuid = self._post_server()
response = self._do_get('servers/%s/ips/private' % uuid)
subs = self._get_regexes()
@@ -455,6 +595,52 @@ class FlavorsSampleXmlTest(FlavorsSampleJsonTest):
ctype = 'xml'
+class HostsSampleJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.hosts.Hosts"
+
+ def test_host_startup(self):
+ response = self._do_get('os-hosts/%s/startup' % self.compute.host)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('host-get-startup', subs, response)
+
+ def test_host_reboot(self):
+ response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('host-get-reboot', subs, response)
+
+ def test_host_shutdown(self):
+ response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('host-get-shutdown', subs, response)
+
+ def test_host_maintenance(self):
+ response = self._do_put('os-hosts/%s' % self.compute.host,
+ 'host-put-maintenance-req', {})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('host-put-maintenance-resp', subs,
+ response)
+
+ def test_host_get(self):
+ response = self._do_get('os-hosts/%s' % self.compute.host)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('host-get-resp', subs, response)
+
+ def test_hosts_list(self):
+ response = self._do_get('os-hosts')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('hosts-list-resp', subs, response)
+
+
+class HostsSampleXmlTest(HostsSampleJsonTest):
+ ctype = 'xml'
+
+
class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
all_extensions = True
@@ -465,13 +651,13 @@ class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
class ImagesSampleJsonTest(ApiSampleTestBase):
def test_images_list(self):
- """Get api sample of images get list request"""
+ # Get api sample of images get list request.
response = self._do_get('images')
subs = self._get_regexes()
return self._verify_response('images-list-get-resp', subs, response)
def test_image_get(self):
- """Get api sample of one single image details request"""
+ # Get api sample of one single image details request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
self.assertEqual(response.status, 200)
@@ -480,13 +666,13 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-get-resp', subs, response)
def test_images_details(self):
- """Get api sample of all images details request"""
+ # Get api sample of all images details request.
response = self._do_get('images/detail')
subs = self._get_regexes()
return self._verify_response('images-details-get-resp', subs, response)
def test_image_metadata_get(self):
- """Get api sample of a image metadata request"""
+ # Get api sample of a image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s/metadata' % image_id)
subs = self._get_regexes()
@@ -494,7 +680,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-metadata-get-resp', subs, response)
def test_image_metadata_post(self):
- """Get api sample to update metadata of an image metadata request"""
+ # Get api sample to update metadata of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_post(
'images/%s/metadata' % image_id,
@@ -505,7 +691,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_metadata_put(self):
- """Get api sample of image metadata put request"""
+ # Get api sample of image metadata put request.
image_id = fake.get_valid_image_id()
response = self._do_put('images/%s/metadata' % image_id,
'image-metadata-put-req', {})
@@ -515,7 +701,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_meta_key_get(self):
- """Get api sample of a image metadata key request"""
+ # Get api sample of a image metadata key request.
image_id = fake.get_valid_image_id()
key = "kernel_id"
response = self._do_get('images/%s/metadata/%s' % (image_id, key))
@@ -523,7 +709,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-meta-key-get', subs, response)
def test_image_meta_key_put(self):
- """Get api sample of image metadata key put request"""
+ # Get api sample of image metadata key put request.
image_id = fake.get_valid_image_id()
key = "auto_disk_config"
response = self._do_put('images/%s/metadata/%s' % (image_id, key),
@@ -550,6 +736,79 @@ class LimitsSampleXmlTest(LimitsSampleJsonTest):
ctype = 'xml'
+class CoverageExtJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.coverage_ext."
+ "Coverage_ext")
+
+ def setUp(self):
+ super(CoverageExtJsonTests, self).setUp()
+
+ def _fake_check_coverage(self):
+ return False
+
+ def _fake_xml_report(self, outfile=None):
+ return
+
+ self.stubs.Set(coverage_ext.CoverageController, '_check_coverage',
+ _fake_check_coverage)
+ self.stubs.Set(coverage, 'xml_report', _fake_xml_report)
+
+ def test_start_coverage(self):
+ # Start coverage data collection.
+ subs = {}
+ response = self._do_post('os-coverage/action',
+ 'coverage-start-post-req', subs)
+ self.assertEqual(response.status, 200)
+
+ def test_start_coverage_combine(self):
+ # Start coverage data collection.
+ subs = {}
+ response = self._do_post('os-coverage/action',
+ 'coverage-start-combine-post-req', subs)
+ self.assertEqual(response.status, 200)
+
+ def test_stop_coverage(self):
+ # Stop coverage data collection.
+ subs = {
+ 'path': '/.*',
+ }
+ response = self._do_post('os-coverage/action',
+ 'coverage-stop-post-req', subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('coverage-stop-post-resp',
+ subs, response)
+
+ def test_report_coverage(self):
+ # Generate a coverage report.
+ subs = {
+ 'filename': 'report',
+ 'path': '/.*/report',
+ }
+ response = self._do_post('os-coverage/action',
+ 'coverage-report-post-req', subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('coverage-report-post-resp',
+ subs, response)
+
+ def test_xml_report_coverage(self):
+ subs = {
+ 'filename': 'report',
+ 'path': '/.*/report',
+ }
+ response = self._do_post('os-coverage/action',
+ 'coverage-xml-report-post-req', subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('coverage-xml-report-post-resp',
+ subs, response)
+
+
+class CoverageExtXmlTests(CoverageExtJsonTests):
+ ctype = "xml"
+
+
class ServersActionsJsonTest(ServersSampleBase):
def _test_server_action(self, uuid, action,
subs={}, resp_tpl=None, code=202):
@@ -591,7 +850,7 @@ class ServersActionsJsonTest(ServersSampleBase):
'server-action-rebuild-resp')
def test_server_resize(self):
- FLAGS.allow_resize_to_same_host = True
+ self.flags(allow_resize_to_same_host=True)
uuid = self._post_server()
self._test_server_action(uuid, "resize",
{"id": 2,
@@ -679,30 +938,41 @@ class FlavorsExtraDataJsonTest(ApiSampleTestBase):
def _get_flags(self):
f = super(FlavorsExtraDataJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = FLAGS.osapi_compute_extension[:]
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Flavorextradata extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
return f
def test_flavors_extra_data_get(self):
- response = self._do_get('flavors/1')
- subs = self._get_regexes()
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
return self._verify_response('flavors-extra-data-get-resp', subs,
response)
def test_flavors_extra_data_list(self):
response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('flavors-extra-data-list-resp', subs,
response)
- def test_flavors_extra_data_post(self):
+ def test_flavors_extra_data_create(self):
+ subs = {
+ 'flavor_id': 666,
+ 'flavor_name': 'flavortest'
+ }
response = self._do_post('flavors',
'flavors-extra-data-post-req',
- {})
+ subs)
self.assertEqual(response.status, 200)
- subs = self._get_regexes()
+ subs.update(self._get_regexes())
return self._verify_response('flavors-extra-data-post-resp',
subs, response)
@@ -711,6 +981,104 @@ class FlavorsExtraDataXmlTest(FlavorsExtraDataJsonTest):
ctype = 'xml'
+class FlavorRxtxJsonTest(ApiSampleTestBase):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_rxtx.'
+ 'Flavor_rxtx')
+
+ def _get_flags(self):
+ f = super(FlavorRxtxJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorRxtx extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_rxtx_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-rxtx-get-resp', subs,
+ response)
+
+ def test_flavors_rxtx_list(self):
+ response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('flavor-rxtx-list-resp', subs,
+ response)
+
+ def test_flavors_rxtx_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-rxtx-post-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-rxtx-post-resp',
+ subs, response)
+
+
+class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
+ ctype = 'xml'
+
+
+class FlavorSwapJsonTest(ApiSampleTestBase):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
+ 'Flavor_swap')
+
+ def _get_flags(self):
+ f = super(FlavorSwapJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorSwap extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_swap_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-swap-get-resp', subs,
+ response)
+
+ def test_flavor_swap_list(self):
+ response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('flavor-swap-list-resp', subs,
+ response)
+
+ def test_flavor_swap_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-swap-post-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('flavor-swap-post-resp',
+ subs, response)
+
+
+class FlavorSwapXmlTest(FlavorSwapJsonTest):
+ ctype = 'xml'
+
+
class SecurityGroupsSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".security_groups.Security_groups"
@@ -727,14 +1095,14 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
self._verify_response('security-groups-create-resp', subs, response)
def test_security_groups_list(self):
- """Get api sample of security groups get list request"""
+ # Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
return self._verify_response('security-groups-list-get-resp',
subs, response)
def test_security_groups_get(self):
- """Get api sample of security groups get request"""
+ # Get api sample of security groups get request.
security_group_id = '1'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
@@ -742,7 +1110,7 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
subs, response)
def test_security_groups_list_server(self):
- """Get api sample of security groups for a specific server."""
+ # Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
@@ -759,9 +1127,9 @@ class SchedulerHintsJsonTest(ApiSampleTestBase):
"Scheduler_hints")
def test_scheduler_hints_post(self):
- """Get api sample of scheduler hint post request"""
+ # Get api sample of scheduler hint post request.
hints = {'image_id': fake.get_valid_image_id(),
- 'image_near': str(uuid.uuid4())
+ 'image_near': str(uuid_lib.uuid4())
}
response = self._do_post('servers', 'scheduler-hints-post-req',
hints)
@@ -834,8 +1202,8 @@ class FloatingIpsJsonTest(ApiSampleTestBase):
def setUp(self):
super(FloatingIpsJsonTest, self).setUp()
- pool = FLAGS.default_floating_pool
- interface = FLAGS.public_interface
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
self.ip_pool = [
{
@@ -896,7 +1264,7 @@ class FloatingIpsJsonTest(ApiSampleTestBase):
def test_floating_ips_create(self):
response = self._do_post('os-floating-ips',
'floating-ips-create-req',
- {"pool": FLAGS.default_floating_pool})
+ {"pool": CONF.default_floating_pool})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('floating-ips-create-resp',
@@ -922,12 +1290,90 @@ class FloatingIpsXmlTest(FloatingIpsJsonTest):
ctype = 'xml'
+class FloatingIpsBulkJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib." \
+ "floating_ips_bulk.Floating_ips_bulk"
+
+ def setUp(self):
+ super(FloatingIpsBulkJsonTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface,
+ 'host': "testHost"
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ def tearDown(self):
+ self.compute.db.floating_ip_bulk_destroy(
+ context.get_admin_context(), self.ip_pool)
+ super(FloatingIpsBulkJsonTest, self).tearDown()
+
+ def test_floating_ips_bulk_list(self):
+ response = self._do_get('os-floating-ips-bulk')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('floating-ips-bulk-list-resp', subs,
+ response)
+
+ def test_floating_ips_bulk_list_by_host(self):
+ response = self._do_get('os-floating-ips-bulk/testHost')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('floating-ips-bulk-list-by-host-resp',
+ subs, response)
+
+ def test_floating_ips_bulk_create(self):
+ response = self._do_post('os-floating-ips-bulk',
+ 'floating-ips-bulk-create-req',
+ {"ip_range": "192.168.1.0/24",
+ "pool": CONF.default_floating_pool,
+ "interface": CONF.public_interface})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('floating-ips-bulk-create-resp', subs,
+ response)
+
+ def test_floating_ips_bulk_delete(self):
+ response = self._do_put('os-floating-ips-bulk/delete',
+ 'floating-ips-bulk-delete-req',
+ {"ip_range": "192.168.1.0/24"})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('floating-ips-bulk-delete-resp', subs,
+ response)
+
+
+class FloatingIpsBulkXmlTest(FloatingIpsBulkJsonTest):
+ ctype = 'xml'
+
+
class KeyPairsSampleJsonTest(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = 'keypair-[0-9a-f-]+'
+ return subs
+
def test_keypairs_post(self, public_key=None):
- """Get api sample of key pairs post request"""
- key_name = 'keypair-' + str(uuid.uuid4())
+ """Get api sample of key pairs post request."""
+ key_name = 'keypair-' + str(uuid_lib.uuid4())
response = self._do_post('os-keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
subs = self._get_regexes()
@@ -940,8 +1386,8 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
return key_name
def test_keypairs_import_key_post(self):
- """Get api sample of key pairs post to import user's key"""
- key_name = 'keypair-' + str(uuid.uuid4())
+ # Get api sample of key pairs post to import user's key.
+ key_name = 'keypair-' + str(uuid_lib.uuid4())
subs = {
'keypair_name': key_name,
'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
@@ -958,7 +1404,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
self._verify_response('keypairs-import-post-resp', subs, response)
def test_keypairs_get(self):
- """Get api sample of key pairs get request"""
+ # Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = self._get_regexes()
@@ -1048,38 +1494,43 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
super(CloudPipeSampleJsonTest, self).setUp()
def get_user_data(self, project_id):
- """Stub method to generate user data for cloudpipe tests"""
+ """Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
- """Stub to get a valid network and its information"""
+ """Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
self.stubs.Set(CloudPipe, 'get_encoded_zip', get_user_data)
- self.stubs.Set(NetworkManager, "get_network", network_api_get)
+ self.stubs.Set(network_manager.NetworkManager, "get_network",
+ network_api_get)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
+ return subs
def test_cloud_pipe_create(self):
- """Get api samples of cloud pipe extension creation"""
- FLAGS.vpn_image_id = fake.get_valid_image_id()
- project = {'project_id': 'cloudpipe-' + str(uuid.uuid4())}
+ # Get api samples of cloud pipe extension creation.
+ self.flags(vpn_image_id=fake.get_valid_image_id())
+ project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
project)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(project)
- subs['image_id'] = FLAGS.vpn_image_id
+ subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-create-resp', subs, response)
return project
def test_cloud_pipe_list(self):
- """Get api samples of cloud pipe extension get request"""
+ # Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(project)
- subs['image_id'] = FLAGS.vpn_image_id
+ subs['image_id'] = CONF.vpn_image_id
return self._verify_response('cloud-pipe-get-resp', subs, response)
@@ -1087,6 +1538,221 @@ class CloudPipeSampleXmlTest(CloudPipeSampleJsonTest):
ctype = "xml"
+class CloudPipeUpdateJsonTest(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".cloudpipe_update.Cloudpipe_update")
+
+ def _get_flags(self):
+ f = super(CloudPipeUpdateJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Cloudpipe_update also needs cloudpipe to be loaded
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe')
+ return f
+
+ def setUp(self):
+ super(CloudPipeUpdateJsonTest, self).setUp()
+
+ def test_cloud_pipe_update(self):
+ subs = {'vpn_ip': '192.168.1.1',
+ 'vpn_port': 2000}
+ response = self._do_put('os-cloudpipe/configure-project',
+ 'cloud-pipe-update-req',
+ subs)
+ self.assertEqual(response.status, 202)
+
+
+class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest):
+ ctype = "xml"
+
+
+class AgentsJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.agents.Agents"
+
+ def _get_flags(self):
+ f = super(AgentsJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(AgentsJsonTest, self).setUp()
+
+ fake_agents_list = [{'url': 'xxxxxxxxxxxx',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': '1'}]
+
+ def fake_agent_build_create(context, values):
+ values['id'] = '1'
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+ def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+ def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+ def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+
+ def test_agent_create(self):
+ # Creates a new agent build.
+ project = {'url': 'xxxxxxxxxxxx',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'
+ }
+ response = self._do_post('os-agents', 'agent-post-req',
+ project)
+ self.assertEqual(response.status, 200)
+ project['agent_id'] = 1
+ self._verify_response('agent-post-resp', project, response)
+ return project
+
+ def test_agent_list(self):
+ # Return a list of all agent builds.
+ response = self._do_get('os-agents')
+ self.assertEqual(response.status, 200)
+ project = {'url': 'xxxxxxxxxxxx',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1
+ }
+ return self._verify_response('agents-get-resp', project, response)
+
+ def test_agent_update(self):
+ # Update an existing agent build.
+ agent_id = 1
+ subs = {'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
+ response = self._do_put('os-agents/%s' % agent_id,
+ 'agent-update-put-req', subs)
+ self.assertEqual(response.status, 200)
+ subs['agent_id'] = 1
+ return self._verify_response('agent-update-put-resp', subs, response)
+
+ def test_agent_delete(self):
+ # Deletes an existing agent build.
+ agent_id = 1
+ response = self._do_delete('os-agents/%s' % agent_id)
+ self.assertEqual(response.status, 200)
+
+
+class AgentsXmlTest(AgentsJsonTest):
+ ctype = "xml"
+
+
+class FixedIpJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips"
+
+ def _get_flags(self):
+ f = super(FixedIpJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(FixedIpJsonTest, self).setUp()
+
+ fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ ]
+
+ def fake_fixed_ip_get_by_address(context, address):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ host = {'host': "host",
+ 'hostname': 'openstack'}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, network, host)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ def test_fixed_ip_reserve(self):
+ # Reserve a Fixed IP.
+ project = {'reserve': None}
+ response = self._do_post('os-fixed-ips/192.168.1.1/action',
+ 'fixedip-post-req',
+ project)
+ self.assertEqual(response.status, 202)
+
+ def test_get_fixed_ip(self):
+ # Return data about the given fixed ip.
+ response = self._do_get('os-fixed-ips/192.168.1.1')
+ self.assertEqual(response.status, 200)
+ project = {'cidr': '192.168.1.0/24',
+ 'hostname': 'openstack',
+ 'host': 'host',
+ 'address': '192.168.1.1'}
+ return self._verify_response('fixedips-get-resp', project, response)
+
+
+class FixedIpXmlTest(FixedIpJsonTest):
+ ctype = "xml"
+
+
class AggregatesSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".aggregates.Aggregates"
@@ -1188,7 +1854,7 @@ class UsedLimitsSamplesJsonTest(ApiSampleTestBase):
"Used_limits")
def test_get_used_limits(self):
- """Get api sample to used limits"""
+ # Get api sample to used limits.
response = self._do_get('limits')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
@@ -1240,7 +1906,7 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
"Simple_tenant_usage")
def setUp(self):
- """setUp method for simple tenant usage"""
+ """setUp method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).setUp()
self._post_server()
timeutils.set_time_override(timeutils.utcnow() +
@@ -1251,12 +1917,12 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
}
def tearDown(self):
- """tearDown method for simple tenant usage"""
+ """tearDown method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_get_tenants_usage(self):
- """Get api sample to get all tenants usage request"""
+ # Get api sample to get all tenants usage request.
response = self._do_get('os-simple-tenant-usage?%s' % (
urllib.urlencode(self.query)))
self.assertEqual(response.status, 200)
@@ -1264,7 +1930,7 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
self._verify_response('simple-tenant-usage-get', subs, response)
def test_get_tenant_usage_details(self):
- """Get api sample to get specific tenant usage request"""
+ # Get api sample to get specific tenant usage request.
tenant_id = 'openstack'
response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
urllib.urlencode(self.query)))
@@ -1327,64 +1993,64 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.uuid = self._post_server()
def test_post_pause(self):
- """Get api samples to pause server request"""
+ # Get api samples to pause server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-pause', {})
self.assertEqual(response.status, 202)
def test_post_unpause(self):
- """Get api samples to unpause server request"""
+ # Get api samples to unpause server request.
self.test_post_pause()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unpause', {})
self.assertEqual(response.status, 202)
def test_post_suspend(self):
- """Get api samples to suspend server request"""
+ # Get api samples to suspend server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-suspend', {})
self.assertEqual(response.status, 202)
def test_post_resume(self):
- """Get api samples to server resume request"""
+ # Get api samples to server resume request.
self.test_post_suspend()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-resume', {})
self.assertEqual(response.status, 202)
def test_post_migrate(self):
- """Get api samples to migrate server request"""
+ # Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-migrate', {})
self.assertEqual(response.status, 202)
def test_post_reset_network(self):
- """Get api samples to reset server network request"""
+ # Get api samples to reset server network request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-network', {})
self.assertEqual(response.status, 202)
def test_post_inject_network_info(self):
- """Get api samples to inject network info request"""
+ # Get api samples to inject network info request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-inject-network-info', {})
self.assertEqual(response.status, 202)
def test_post_lock_server(self):
- """Get api samples to lock server request"""
+ # Get api samples to lock server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-lock-server', {})
self.assertEqual(response.status, 202)
def test_post_unlock_server(self):
- """Get api samples to unlock server request"""
+ # Get api samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unlock-server', {})
self.assertEqual(response.status, 202)
def test_post_backup_server(self):
- """Get api samples to backup server request"""
+ # Get api samples to backup server request.
def image_details(self, context, **kwargs):
"""This stub is specifically used on the backup action."""
# NOTE(maurosr): I've added this simple stub cause backup action
@@ -1399,17 +2065,17 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.assertEqual(response.status, 202)
def test_post_live_migrate_server(self):
- """Get api samples to server live migrate request"""
+ # Get api samples to server live migrate request.
def fake_live_migration_src_check(self, context, instance_ref):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
def fake_live_migration_dest_check(self, context, instance_ref, dest):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
def fake_live_migration_common(self, context, instance_ref, dest):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
self.stubs.Set(driver.Scheduler, '_live_migration_src_check',
fake_live_migration_src_check)
@@ -1427,8 +2093,8 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
hypervisor_type='bar',
hypervisor_version='1',
disabled=False)
- return [{'compute_node': [service]}]
- self.stubs.Set(db, "service_get_all_compute_by_host", fake_get_compute)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-live-migrate',
@@ -1436,7 +2102,7 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.assertEqual(response.status, 202)
def test_post_reset_state(self):
- """get api samples to server reset state request"""
+ # get api samples to server reset state request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-server-state', {})
self.assertEqual(response.status, 202)
@@ -1444,3 +2110,456 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
class AdminActionsSamplesXmlTest(AdminActionsSamplesJsonTest):
ctype = 'xml'
+
+
+class ConsolesSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".consoles.Consoles")
+
+ def test_get_vnc_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ return self._verify_response('get-vnc-console-post-resp',
+ subs, response)
+
+
+class ConsolesSampleXmlTests(ConsolesSampleJsonTests):
+ ctype = 'xml'
+
+
+class DeferredDeleteSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".deferred_delete.Deferred_delete")
+
+ def setUp(self):
+ super(DeferredDeleteSampleJsonTests, self).setUp()
+ self.flags(reclaim_instance_interval=1)
+
+ def test_restore(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'restore-post-req', {})
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), '')
+
+ def test_force_delete(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'force-delete-post-req', {})
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), '')
+
+
+class DeferredDeleteSampleXmlTests(DeferredDeleteSampleJsonTests):
+ ctype = 'xml'
+
+
+class QuotasSampleJsonTests(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+ def test_show_quotas(self):
+ # Get api sample to show quotas.
+ response = self._do_get('os-quota-sets/fake_tenant')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quotas-show-get-resp', {}, response)
+
+ def test_show_quotas_defaults(self):
+ # Get api sample to show quotas defaults.
+ response = self._do_get('os-quota-sets/fake_tenant/defaults')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quotas-show-defaults-get-resp',
+ {}, response)
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quotas-update-post-resp', {}, response)
+
+
+class QuotasSampleXmlTests(QuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedStatusSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_status.Extended_status")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ return self._verify_response('servers-list-resp', subs, response)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('servers-detail-resp', subs, response)
+
+
+class ExtendedStatusSampleXmlTests(ExtendedStatusSampleJsonTests):
+ ctype = 'xml'
+
+
+class FlavorManageSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.flavormanage."
+ "Flavormanage")
+
+ def _create_flavor(self):
+ """Create a flavor."""
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': "test_flavor"
+ }
+ response = self._do_post("flavors",
+ "flavor-create-post-req",
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response("flavor-create-post-resp", subs, response)
+
+ def test_create_flavor(self):
+ # Get api sample to create a flavor.
+ self._create_flavor()
+
+ def test_delete_flavor(self):
+ # Get api sample to delete a flavor.
+ self._create_flavor()
+ response = self._do_delete("flavors/10")
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), '')
+
+
+class FlavorManageSampleXmlTests(FlavorManageSampleJsonTests):
+ ctype = "xml"
+
+
+class ServerPasswordSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.server_password."
+ "Server_password")
+
+ def test_get_password(self):
+
+ # Mock password since there is no api to set it
+ def fake_ext_password(*args, **kwargs):
+ return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
+ "Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
+ "28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
+ "VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
+ "JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
+ "QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
+ "X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
+ self.stubs.Set(password, "extract_password", fake_ext_password)
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-server-password' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
+ return self._verify_response('get-password-resp', subs, response)
+
+ def test_reset_password(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s/os-server-password' % uuid)
+ self.assertEqual(response.status, 204)
+
+
+class ServerPasswordSampleXmlTests(ServerPasswordSampleJsonTests):
+ ctype = "xml"
+
+
+class DiskConfigJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.disk_config."
+ "Disk_config")
+
+ def test_list_servers_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ return self._verify_response('list-servers-detail-get',
+ subs, response)
+
+ def test_get_server(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-get-resp', subs, response)
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-put-req', {})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-update-put-resp',
+ subs, response)
+
+ def test_resize_server(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-resize-post-req', {})
+ self.assertEqual(response.status, 202)
+ # NOTE(tmello): Resize does not return response body
+ # Bug #1085213.
+ self.assertEqual(response.read(), "")
+
+ def test_rebuild_server(self):
+ uuid = self._post_server()
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-req', subs)
+ self.assertEqual(response.status, 202)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-action-rebuild-resp',
+ subs, response)
+
+ def test_get_image(self):
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ return self._verify_response('image-get-resp', subs, response)
+
+ def test_list_images(self):
+ response = self._do_get('images/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('image-list-resp', subs, response)
+
+
+class DiskConfigXmlTest(DiskConfigJsonTest):
+ ctype = 'xml'
+
+
+class OsNetworksJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
+ ".Os_tenant_networks")
+
+ def setUp(self):
+ super(OsNetworksJsonTests, self).setUp()
+ CONF.set_override("enable_network_quota", True)
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
+
+ def test_list_networks(self):
+ response = self._do_get('os-tenant-networks')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('networks-list-res', subs, response)
+
+ def test_create_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ self._verify_response('networks-post-res', subs, response)
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = json.loads(response.read())
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
+ self.assertEqual(response.status, 202)
+
+
+class NetworksAssociateJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".networks_associate.Networks_associate")
+
+ _sentinel = object()
+
+ def _get_flags(self):
+ f = super(NetworksAssociateJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Networks_associate requires Networks to be update
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
+ return f
+
+ def setUp(self):
+ super(NetworksAssociateJsonTests, self).setUp()
+
+ def fake_associate(self, context, network_id,
+ host=NetworksAssociateJsonTests._sentinel,
+ project=NetworksAssociateJsonTests._sentinel):
+ return True
+
+ self.stubs.Set(network_api.API, "associate", fake_associate)
+
+ def test_disassociate(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-req',
+ {})
+ self.assertEqual(response.status, 202)
+
+ def test_disassociate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-host-req',
+ {})
+ self.assertEqual(response.status, 202)
+
+ def test_disassociate_project(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-project-req',
+ {})
+ self.assertEqual(response.status, 202)
+
+ def test_associate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-associate-host-req',
+ {"host": "testHost"})
+ self.assertEqual(response.status, 202)
+
+
+class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
+ ctype = 'xml'
+
+
+class FlavorDisabledSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.flavor_disabled."
+ "Flavor_disabled")
+
+ def test_show_flavor(self):
+ # Get api sample to show flavor_disabled attr. of a flavor.
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['flavor_id'] = flavor_id
+ return self._verify_response('flavor-show-get-resp', subs,
+ response)
+
+ def test_detail_flavor(self):
+ # Get api sample to show details of a flavor.
+ response = self._do_get('flavors/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('flavor-detail-get-resp', subs,
+ response)
+
+
+class FlavorDisabledSampleXmlTests(FlavorDisabledSampleJsonTests):
+ ctype = "xml"
+
+
+class QuotaClassesSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
+ set_id = 'test_class'
+
+ def test_show_quota_classes(self):
+ # Get api sample to show quota classes.
+ response = self._do_get('os-quota-class-sets/%s' % self.set_id)
+ self.assertEqual(response.status, 200)
+ subs = {'set_id': self.set_id}
+ return self._verify_response('quota-classes-show-get-resp', subs,
+ response)
+
+ def test_update_quota_classes(self):
+ # Get api sample to update quota classes.
+ response = self._do_put('os-quota-class-sets/%s' % self.set_id,
+ 'quota-classes-update-post-req',
+ {})
+ self.assertEqual(response.status, 200)
+ return self._verify_response('quota-classes-update-post-resp',
+ {}, response)
+
+
+class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests):
+ ctype = "xml"
+
+
+class CellsSampleJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.cells.Cells"
+
+ def setUp(self):
+ # db_check_interval < 0 makes cells manager always hit the DB
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsSampleJsonTest, self).setUp()
+ self._stub_cells()
+
+ def _stub_cells(self, num_cells=5):
+ self.cells = []
+ self.cells_next_id = 1
+
+ def _fake_cell_get_all(context):
+ return self.cells
+
+ def _fake_cell_get(context, cell_name):
+ for cell in self.cells:
+ if cell['name'] == cell_name:
+ return cell
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ for x in xrange(num_cells):
+ cell = models.Cell()
+ our_id = self.cells_next_id
+ self.cells_next_id += 1
+ cell.update({'id': our_id,
+ 'name': 'cell%s' % our_id,
+ 'username': 'username%s' % our_id,
+ 'is_parent': our_id % 2 == 0})
+ self.cells.append(cell)
+
+ self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stubs.Set(db, 'cell_get', _fake_cell_get)
+
+ def test_cells_empty_list(self):
+ # Override this
+ self._stub_cells(num_cells=0)
+ response = self._do_get('os-cells')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('cells-list-empty-resp', subs, response)
+
+ def test_cells_list(self):
+ response = self._do_get('os-cells')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('cells-list-resp', subs, response)
+
+ def test_cells_get(self):
+ response = self._do_get('os-cells/cell3')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('cells-get-resp', subs, response)
+
+
+class CellsSampleXmlTest(CellsSampleJsonTest):
+ ctype = 'xml'
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index 056ff32b7..ca5ff8374 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -15,26 +15,26 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.openstack.common import cfg
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
- f['osapi_compute_extension'] = FLAGS.osapi_compute_extension[:]
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
- """Simple check that fox-n-socks works."""
+ # Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.read()
LOG.debug("foxnsocks: %s" % foxnsocks)
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
index cecfef31a..80b40e616 100644
--- a/nova/tests/integrated/test_login.py
+++ b/nova/tests/integrated/test_login.py
@@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__)
class LoginTest(integrated_helpers._IntegratedTestBase):
def test_login(self):
- """Simple check - we list flavors - so we know we're logged in."""
+ # Simple check - we list flavors - so we know we're logged in.
flavors = self.api.get_flavors()
for flavor in flavors:
LOG.debug(_("flavor: %s") % flavor)
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index fbab7eb49..5a82e0033 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -21,12 +21,10 @@ import signal
import time
import traceback
-from nova import flags
from nova.openstack.common.log import logging
from nova import service
from nova.tests.integrated import integrated_helpers
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
@@ -73,18 +71,24 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
self.pid = pid
- # Wait for up to a second for workers to get started
- start = time.time()
- while time.time() - start < 1:
- workers = self._get_workers()
- if len(workers) == self.workers:
- break
-
- time.sleep(.1)
+ # Wait at most 10 seconds to spawn workers
+ cond = lambda: self.workers == len(self._get_workers())
+ timeout = 10
+ self._wait(cond, timeout)
+ workers = self._get_workers()
self.assertEqual(len(workers), self.workers)
return workers
+ def _wait(self, cond, timeout):
+ start = time.time()
+ while True:
+ if cond():
+ break
+ if time.time() - start > timeout:
+ break
+ time.sleep(.1)
+
def tearDown(self):
if self.pid:
# Make sure all processes are stopped
@@ -116,18 +120,14 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
LOG.info('pid of first child is %s' % start_workers[0])
os.kill(start_workers[0], signal.SIGTERM)
- # loop and check if new worker is spawned (for 1 second max)
- start = time.time()
- while time.time() - start < 1:
- end_workers = self._get_workers()
- LOG.info('workers: %r' % end_workers)
-
- if start_workers != end_workers:
- break
-
- time.sleep(.1)
+ # Wait at most 5 seconds to respawn a worker
+ cond = lambda: start_workers != self._get_workers()
+ timeout = 5
+ self._wait(cond, timeout)
# Make sure worker pids don't match
+ end_workers = self._get_workers()
+ LOG.info('workers: %r' % end_workers)
self.assertNotEqual(start_workers, end_workers)
# check if api service still works
@@ -143,17 +143,13 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
os.kill(self.pid, sig)
- # loop and check if all processes are killed (for 1 second max)
- start = time.time()
- while time.time() - start < 1:
- workers = self._get_workers()
- LOG.info('workers: %r' % workers)
-
- if not workers:
- break
-
- time.sleep(.1)
+ # Wait at most 5 seconds to kill all workers
+ cond = lambda: not self._get_workers()
+ timeout = 5
+ self._wait(cond, timeout)
+ workers = self._get_workers()
+ LOG.info('workers: %r' % workers)
self.assertFalse(workers, 'No OS processes left.')
def test_terminate_sigkill(self):
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 0c9024a8a..0756775dd 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -16,7 +16,6 @@
# under the License.
import time
-import unittest
from nova.openstack.common.log import logging
from nova.tests import fake_network
@@ -29,6 +28,10 @@ LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
+ def setUp(self):
+ super(ServersTest, self).setUp()
+ self.conductor = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
@@ -45,13 +48,13 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
- """Simple check that listing servers works."""
+ # Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
- """Create a server which will enter error state."""
+ # Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*_):
@@ -72,7 +75,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
- """Creates and deletes a server."""
+ # Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
@@ -137,13 +140,13 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_deferred_delete(self):
- """Creates, deletes and waits for server to be reclaimed."""
+ # Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(
- periodic_interval=0.3, periodic_fuzzy_delay=0)
+ self._restart_compute_service(periodic_interval_max=0.3,
+ periodic_fuzzy_delay=0)
# Create server
server = self._build_minimal_create_server_request()
@@ -180,7 +183,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
- """Creates, deletes and restores a server."""
+ # Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -213,7 +216,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
- """Creates, deletes and force deletes a server."""
+ # Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -270,7 +273,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
- """Creates a server with metadata."""
+ # Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
@@ -312,7 +315,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
- """Rebuild a server with metadata."""
+ # Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
@@ -379,7 +382,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_rename_server(self):
- """Test building and renaming a server."""
+ # Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
@@ -400,7 +403,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(server_id)
def test_create_multiple_servers(self):
- """Creates multiple servers and checks for reservation_id"""
+ # Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
@@ -434,7 +437,3 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
index b6bf197d7..1c1dd1b06 100644
--- a/nova/tests/integrated/test_xml.py
+++ b/nova/tests/integrated/test_xml.py
@@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
def test_namespace_servers(self):
- """/servers should have v1.1 namespace (has changed in 1.1)."""
+ # /servers should have v1.1 namespace (has changed in 1.1).
headers = {}
headers['Accept'] = 'application/xml'
diff --git a/nova/tests/matchers.py b/nova/tests/matchers.py
new file mode 100644
index 000000000..be65da823
--- /dev/null
+++ b/nova/tests/matchers.py
@@ -0,0 +1,454 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Matcher classes to be used inside of the testtools assertThat framework."""
+
+import pprint
+
+from lxml import etree
+
+
+class DictKeysMismatch(object):
+ def __init__(self, d1only, d2only):
+ self.d1only = d1only
+ self.d2only = d2only
+
+ def describe(self):
+ return ('Keys in d1 and not d2: %(d1only)s.'
+ ' Keys in d2 and not d1: %(d2only)s' % self.__dict__)
+
+ def get_details(self):
+ return {}
+
+
+class DictMismatch(object):
+ def __init__(self, key, d1_value, d2_value):
+ self.key = key
+ self.d1_value = d1_value
+ self.d2_value = d2_value
+
+ def describe(self):
+ return ("Dictionaries do not match at %(key)s."
+ " d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__)
+
+ def get_details(self):
+ return {}
+
+
+class DictMatches(object):
+
+ def __init__(self, d1, approx_equal=False, tolerance=0.001):
+ self.d1 = d1
+ self.approx_equal = approx_equal
+ self.tolerance = tolerance
+
+ def __str__(self):
+ return 'DictMatches(%s)' % (pprint.pformat(self.d1))
+
+ # Useful assertions
+ def match(self, d2):
+ """Assert two dicts are equivalent.
+
+ This is a 'deep' match in the sense that it handles nested
+ dictionaries appropriately.
+
+ NOTE:
+
+ If you don't care (or don't know) a given value, you can specify
+ the string DONTCARE as the value. This will cause that dict-item
+ to be skipped.
+
+ """
+
+ d1keys = set(self.d1.keys())
+ d2keys = set(d2.keys())
+ if d1keys != d2keys:
+ d1only = d1keys - d2keys
+ d2only = d2keys - d1keys
+ return DictKeysMismatch(d1only, d2only)
+
+ for key in d1keys:
+ d1value = self.d1[key]
+ d2value = d2[key]
+ try:
+ error = abs(float(d1value) - float(d2value))
+ within_tolerance = error <= self.tolerance
+ except (ValueError, TypeError):
+ # If both values aren't convertible to float, just ignore
+ # ValueError if arg is a str, TypeError if it's something else
+ # (like None)
+ within_tolerance = False
+
+ if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
+ matcher = DictMatches(d1value)
+ did_match = matcher.match(d2value)
+ if did_match is not None:
+ return did_match
+ elif 'DONTCARE' in (d1value, d2value):
+ continue
+ elif self.approx_equal and within_tolerance:
+ continue
+ elif d1value != d2value:
+ return DictMismatch(key, d1value, d2value)
+
+
+class ListLengthMismatch(object):
+ def __init__(self, len1, len2):
+ self.len1 = len1
+ self.len2 = len2
+
+ def describe(self):
+ return ('Length mismatch: len(L1)=%(len1)d != '
+ 'len(L2)=%(len2)d' % self.__dict__)
+
+ def get_details(self):
+ return {}
+
+
+class DictListMatches(object):
+
+ def __init__(self, l1, approx_equal=False, tolerance=0.001):
+ self.l1 = l1
+ self.approx_equal = approx_equal
+ self.tolerance = tolerance
+
+ def __str__(self):
+ return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
+
+ # Useful assertions
+ def match(self, l2):
+ """Assert a list of dicts are equivalent."""
+
+ l1count = len(self.l1)
+ l2count = len(l2)
+ if l1count != l2count:
+ return ListLengthMismatch(l1count, l2count)
+
+ for d1, d2 in zip(self.l1, l2):
+ matcher = DictMatches(d2,
+ approx_equal=self.approx_equal,
+ tolerance=self.tolerance)
+ did_match = matcher.match(d1)
+ if did_match:
+ return did_match
+
+
+class SubDictMismatch(object):
+ def __init__(self,
+ key=None,
+ sub_value=None,
+ super_value=None,
+ keys=False):
+ self.key = key
+ self.sub_value = sub_value
+ self.super_value = super_value
+ self.keys = keys
+
+ def describe(self):
+ if self.keys:
+ return "Keys between dictionaries did not match"
+ else:
+ return("Dictionaries do not match at %s. d1: %s d2: %s"
+ % (self.key,
+ self.super_value,
+ self.sub_value))
+
+ def get_details(self):
+ return {}
+
+
+class IsSubDictOf(object):
+
+ def __init__(self, super_dict):
+ self.super_dict = super_dict
+
+ def __str__(self):
+ return 'IsSubDictOf(%s)' % (self.super_dict)
+
+ def match(self, sub_dict):
+ """Assert a sub_dict is subset of super_dict."""
+ if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
+ return SubDictMismatch(keys=True)
+ for k, sub_value in sub_dict.items():
+ super_value = self.super_dict[k]
+ if isinstance(sub_value, dict):
+ matcher = IsSubDictOf(super_value)
+ did_match = matcher.match(sub_value)
+ if did_match is not None:
+ return did_match
+ elif 'DONTCARE' in (sub_value, super_value):
+ continue
+ else:
+ if sub_value != super_value:
+ return SubDictMismatch(k, sub_value, super_value)
+
+
+class FunctionCallMatcher(object):
+
+ def __init__(self, expected_func_calls):
+ self.expected_func_calls = expected_func_calls
+ self.actual_func_calls = []
+
+ def call(self, *args, **kwargs):
+ func_call = {'args': args, 'kwargs': kwargs}
+ self.actual_func_calls.append(func_call)
+
+ def match(self):
+ dict_list_matcher = DictListMatches(self.expected_func_calls)
+ return dict_list_matcher.match(self.actual_func_calls)
+
+
+class XMLMismatch(object):
+ """Superclass for XML mismatch."""
+
+ def __init__(self, state):
+ self.path = str(state)
+ self.expected = state.expected
+ self.actual = state.actual
+
+ def describe(self):
+ return "%(path)s: XML does not match" % self.__dict__
+
+ def get_details(self):
+ return {
+ 'expected': self.expected,
+ 'actual': self.actual,
+ }
+
+
+class XMLTagMismatch(XMLMismatch):
+ """XML tags don't match."""
+
+ def __init__(self, state, idx, expected_tag, actual_tag):
+ super(XMLTagMismatch, self).__init__(state)
+ self.idx = idx
+ self.expected_tag = expected_tag
+ self.actual_tag = actual_tag
+
+ def describe(self):
+ return ("%(path)s: XML tag mismatch at index %(idx)d: "
+ "expected tag <%(expected_tag)s>; "
+ "actual tag <%(actual_tag)s>" % self.__dict__)
+
+
+class XMLAttrKeysMismatch(XMLMismatch):
+ """XML attribute keys don't match."""
+
+ def __init__(self, state, expected_only, actual_only):
+ super(XMLAttrKeysMismatch, self).__init__(state)
+ self.expected_only = ', '.join(sorted(expected_only))
+ self.actual_only = ', '.join(sorted(actual_only))
+
+ def describe(self):
+ return ("%(path)s: XML attributes mismatch: "
+ "keys only in expected: %(expected_only)s; "
+ "keys only in actual: %(actual_only)s" % self.__dict__)
+
+
+class XMLAttrValueMismatch(XMLMismatch):
+ """XML attribute values don't match."""
+
+ def __init__(self, state, key, expected_value, actual_value):
+ super(XMLAttrValueMismatch, self).__init__(state)
+ self.key = key
+ self.expected_value = expected_value
+ self.actual_value = actual_value
+
+ def describe(self):
+ return ("%(path)s: XML attribute value mismatch: "
+ "expected value of attribute %(key)s: %(expected_value)r; "
+ "actual value: %(actual_value)r" % self.__dict__)
+
+
+class XMLTextValueMismatch(XMLMismatch):
+ """XML text values don't match."""
+
+ def __init__(self, state, expected_text, actual_text):
+ super(XMLTextValueMismatch, self).__init__(state)
+ self.expected_text = expected_text
+ self.actual_text = actual_text
+
+ def describe(self):
+ return ("%(path)s: XML text value mismatch: "
+ "expected text value: %(expected_text)r; "
+ "actual value: %(actual_text)r" % self.__dict__)
+
+
+class XMLUnexpectedChild(XMLMismatch):
+ """Unexpected child present in XML."""
+
+ def __init__(self, state, tag, idx):
+ super(XMLUnexpectedChild, self).__init__(state)
+ self.tag = tag
+ self.idx = idx
+
+ def describe(self):
+ return ("%(path)s: XML unexpected child element <%(tag)s> "
+ "present at index %(idx)d" % self.__dict__)
+
+
+class XMLExpectedChild(XMLMismatch):
+ """Expected child not present in XML."""
+
+ def __init__(self, state, tag, idx):
+ super(XMLExpectedChild, self).__init__(state)
+ self.tag = tag
+ self.idx = idx
+
+ def describe(self):
+ return ("%(path)s: XML expected child element <%(tag)s> "
+ "not present at index %(idx)d" % self.__dict__)
+
+
+class XMLMatchState(object):
+ """
+ Maintain some state for matching.
+
+ Tracks the XML node path and saves the expected and actual full
+ XML text, for use by the XMLMismatch subclasses.
+ """
+
+ def __init__(self, expected, actual):
+ self.path = []
+ self.expected = expected
+ self.actual = actual
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self.path.pop()
+ return False
+
+ def __str__(self):
+ return '/' + '/'.join(self.path)
+
+ def node(self, tag, idx):
+ """
+ Adds tag and index to the path; they will be popped off when
+ the corresponding 'with' statement exits.
+
+ :param tag: The element tag
+ :param idx: If not None, the integer index of the element
+ within its parent. Not included in the path
+ element if None.
+ """
+
+ if idx is not None:
+ self.path.append("%s[%d]" % (tag, idx))
+ else:
+ self.path.append(tag)
+ return self
+
+
+class XMLMatches(object):
+ """Compare XML strings. More complete than string comparison."""
+
+ def __init__(self, expected):
+ self.expected_xml = expected
+ self.expected = etree.fromstring(expected)
+
+ def __str__(self):
+ return 'XMLMatches(%r)' % self.expected_xml
+
+ def match(self, actual_xml):
+ actual = etree.fromstring(actual_xml)
+
+ state = XMLMatchState(self.expected_xml, actual_xml)
+ result = self._compare_node(self.expected, actual, state, None)
+
+ if result is False:
+ return XMLMismatch(state)
+ elif result is not True:
+ return result
+
+ def _compare_node(self, expected, actual, state, idx):
+ """Recursively compares nodes within the XML tree."""
+
+ # Start by comparing the tags
+ if expected.tag != actual.tag:
+ return XMLTagMismatch(state, idx, expected.tag, actual.tag)
+
+ with state.node(expected.tag, idx):
+ # Compare the attribute keys
+ expected_attrs = set(expected.attrib.keys())
+ actual_attrs = set(actual.attrib.keys())
+ if expected_attrs != actual_attrs:
+ expected_only = expected_attrs - actual_attrs
+ actual_only = actual_attrs - expected_attrs
+ return XMLAttrKeysMismatch(state, expected_only, actual_only)
+
+ # Compare the attribute values
+ for key in expected_attrs:
+ expected_value = expected.attrib[key]
+ actual_value = actual.attrib[key]
+
+ if 'DONTCARE' in (expected_value, actual_value):
+ continue
+ elif expected_value != actual_value:
+ return XMLAttrValueMismatch(state, key, expected_value,
+ actual_value)
+
+ # Compare the contents of the node
+ if len(expected) == 0 and len(actual) == 0:
+ # No children, compare text values
+ if ('DONTCARE' not in (expected.text, actual.text) and
+ expected.text != actual.text):
+ return XMLTextValueMismatch(state, expected.text,
+ actual.text)
+ else:
+ expected_idx = 0
+ actual_idx = 0
+ while (expected_idx < len(expected) and
+ actual_idx < len(actual)):
+ # Ignore comments and processing instructions
+ # TODO(Vek): may interpret PIs in the future, to
+ # allow for, say, arbitrary ordering of some
+ # elements
+ if (expected[expected_idx].tag in
+ (etree.Comment, etree.ProcessingInstruction)):
+ expected_idx += 1
+ continue
+
+ # Compare the nodes
+ result = self._compare_node(expected[expected_idx],
+ actual[actual_idx], state,
+ actual_idx)
+ if result is not True:
+ return result
+
+ # Step on to comparing the next nodes...
+ expected_idx += 1
+ actual_idx += 1
+
+ # Make sure we consumed all nodes in actual
+ if actual_idx < len(actual):
+ return XMLUnexpectedChild(state, actual[actual_idx].tag,
+ actual_idx)
+
+ # Make sure we consumed all nodes in expected
+ if expected_idx < len(expected):
+ for node in expected[expected_idx:]:
+ if (node.tag in
+ (etree.Comment, etree.ProcessingInstruction)):
+ continue
+
+ return XMLExpectedChild(state, node.tag, actual_idx)
+
+ # The nodes match
+ return True
diff --git a/nova/tests/monkey_patch_example/__init__.py b/nova/tests/monkey_patch_example/__init__.py
index 25cf9ccfe..779dc72f3 100644
--- a/nova/tests/monkey_patch_example/__init__.py
+++ b/nova/tests/monkey_patch_example/__init__.py
@@ -21,7 +21,7 @@ CALLED_FUNCTION = []
def example_decorator(name, function):
- """ decorator for notify which is used from utils.monkey_patch()
+ """decorator for notify which is used from utils.monkey_patch()
:param name: name of the function
:param function: - object of the function
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index bfc9dc878..959c5a472 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -15,10 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for network API"""
+"""Tests for network API."""
+import itertools
import random
+import mox
+
from nova import context
from nova import exception
from nova import network
@@ -37,8 +40,27 @@ class ApiTestCase(test.TestCase):
self.context = context.RequestContext('fake-user',
'fake-project')
+ def test_allocate_for_instance_handles_macs_passed(self):
+ # If a macs argument is supplied to the 'nova-network' API, it is just
+ # ignored. This test checks that the call down to the rpcapi layer
+ # doesn't pass macs down: nova-network doesn't support hypervisor
+ # mac address limits (today anyhow).
+ macs = set(['ab:cd:ef:01:23:34'])
+ self.mox.StubOutWithMock(
+ self.network_api.network_rpcapi, "allocate_for_instance")
+ kwargs = dict(zip(['host', 'instance_id', 'instance_uuid',
+ 'project_id', 'requested_networks', 'rxtx_factor', 'vpn'],
+ itertools.repeat(mox.IgnoreArg())))
+ self.network_api.network_rpcapi.allocate_for_instance(
+ mox.IgnoreArg(), **kwargs).AndReturn([])
+ self.mox.ReplayAll()
+ instance = dict(id='id', uuid='uuid', project_id='project_id',
+ host='host', instance_type={'rxtx_factor': 0})
+ self.network_api.allocate_for_instance(
+ 'context', instance, 'vpn', 'requested_networks', macs=macs)
+
def _do_test_associate_floating_ip(self, orig_instance_uuid):
- """Test post-association logic"""
+ """Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
@@ -117,7 +139,6 @@ class ApiTestCase(test.TestCase):
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
- expected['host'] = 'fake_compute_dest'
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
@@ -125,6 +146,7 @@ class ApiTestCase(test.TestCase):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
+ expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
@@ -139,6 +161,7 @@ class ApiTestCase(test.TestCase):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
+ expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
@@ -151,7 +174,7 @@ class ApiTestCase(test.TestCase):
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
- raise exception.FixedIpNotFoundForInstance
+ raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
@@ -197,3 +220,15 @@ class ApiTestCase(test.TestCase):
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
+
+ def test_get_backdoor_port(self):
+ backdoor_port = 59697
+
+ def fake_get_backdoor_port(ctxt, host):
+ return backdoor_port
+
+ self.stubs.Set(self.network_api.network_rpcapi, 'get_backdoor_port',
+ fake_get_backdoor_port)
+
+ port = self.network_api.get_backdoor_port(self.context, 'fake_host')
+ self.assertEqual(port, backdoor_port)
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index f69023ef8..c0770902d 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -21,20 +21,15 @@ import mox
from nova import context
from nova import db
-from nova import flags
+from nova.network import driver
from nova.network import linux_net
from nova.openstack.common import fileutils
-from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova import utils
-
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
-
HOST = "testhost"
instances = {'00000000-0000-0000-0000-0000000000000000':
@@ -216,8 +211,7 @@ class LinuxNetworkTestCase(test.TestCase):
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
- network_driver = FLAGS.network_driver
- self.driver = importutils.import_module(network_driver)
+ self.driver = driver.load_network_driver()
self.driver.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
@@ -308,8 +302,26 @@ class LinuxNetworkTestCase(test.TestCase):
self.assertEquals(actual_hosts, expected)
+ def test_get_dns_hosts_for_nw00(self):
+ expected = (
+ "192.168.0.100\tfake_instance00.novalocal\n"
+ "192.168.1.101\tfake_instance01.novalocal\n"
+ "192.168.0.102\tfake_instance00.novalocal"
+ )
+ actual_hosts = self.driver.get_dns_hosts(self.context, networks[0])
+ self.assertEquals(actual_hosts, expected)
+
+ def test_get_dns_hosts_for_nw01(self):
+ expected = (
+ "192.168.1.100\tfake_instance00.novalocal\n"
+ "192.168.0.101\tfake_instance01.novalocal\n"
+ "192.168.1.102\tfake_instance01.novalocal"
+ )
+ actual_hosts = self.driver.get_dns_hosts(self.context, networks[1])
+ self.assertEquals(actual_hosts, expected)
+
def test_get_dhcp_opts_for_nw00(self):
- expected_opts = 'NW-0,3\nNW-3,3\nNW-4,3'
+ expected_opts = 'NW-3,3\nNW-4,3'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEquals(actual_opts, expected_opts)
@@ -335,6 +347,12 @@ class LinuxNetworkTestCase(test.TestCase):
actual = self.driver._host_dhcp(data)
self.assertEquals(actual, expected)
+ def test_host_dns_without_default_gateway_network(self):
+ expected = "192.168.0.100\tfake_instance00.novalocal"
+ data = get_associated(self.context, 0)[0]
+ actual = self.driver._host_dns(data)
+ self.assertEquals(actual, expected)
+
def test_linux_bridge_driver_plug(self):
"""Makes sure plug doesn't drop FORWARD by default.
@@ -408,6 +426,101 @@ class LinuxNetworkTestCase(test.TestCase):
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
+ def test_isolated_host(self):
+ self.flags(fake_network=False,
+ share_dhcp_address=True)
+ # NOTE(vish): use a fresh copy of the manager for each test
+ self.stubs.Set(linux_net, 'iptables_manager',
+ linux_net.IptablesManager())
+ self.stubs.Set(linux_net, 'binary_name', 'test')
+ executes = []
+ inputs = []
+
+ def fake_execute(*args, **kwargs):
+ executes.append(args)
+ process_input = kwargs.get('process_input')
+ if process_input:
+ inputs.append(process_input)
+ return "", ""
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+
+ @classmethod
+ def fake_ensure(_self, bridge, interface, network, gateway):
+ return bridge
+
+ self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
+ 'ensure_bridge', fake_ensure)
+
+ iface = 'eth0'
+ dhcp = '192.168.1.1'
+ network = {'dhcp_server': dhcp,
+ 'bridge': 'br100',
+ 'bridge_interface': iface}
+ driver.plug(network, 'fakemac')
+ expected = [
+ ('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface,
+ '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-I', 'INPUT', '-p', 'ARP', '-i', iface,
+ '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface,
+ '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface,
+ '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('iptables-save', '-c', '-t', 'filter'),
+ ('iptables-restore', '-c'),
+ ('iptables-save', '-c', '-t', 'mangle'),
+ ('iptables-restore', '-c'),
+ ('iptables-save', '-c', '-t', 'nat'),
+ ('iptables-restore', '-c'),
+ ('ip6tables-save', '-c', '-t', 'filter'),
+ ('ip6tables-restore', '-c'),
+ ]
+ self.assertEqual(executes, expected)
+ expected_inputs = [
+ '-A test-FORWARD -m physdev --physdev-in %s '
+ '-d 255.255.255.255 -p udp --dport 67 -j DROP' % iface,
+ '-A test-FORWARD -m physdev --physdev-out %s '
+ '-d 255.255.255.255 -p udp --dport 67 -j DROP' % iface,
+ '-A test-FORWARD -m physdev --physdev-in %s '
+ '-d 192.168.1.1 -j DROP' % iface,
+ '-A test-FORWARD -m physdev --physdev-out %s '
+ '-s 192.168.1.1 -j DROP' % iface,
+ ]
+ for inp in expected_inputs:
+ self.assertTrue(inp in inputs[0])
+
+ executes = []
+ inputs = []
+
+ @classmethod
+ def fake_remove(_self, bridge, gateway):
+ return
+
+ self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
+ 'remove_bridge', fake_remove)
+
+ driver.unplug(network)
+ expected = [
+ ('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface,
+ '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface,
+ '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('iptables-save', '-c', '-t', 'filter'),
+ ('iptables-restore', '-c'),
+ ('iptables-save', '-c', '-t', 'mangle'),
+ ('iptables-restore', '-c'),
+ ('iptables-save', '-c', '-t', 'nat'),
+ ('iptables-restore', '-c'),
+ ('ip6tables-save', '-c', '-t', 'filter'),
+ ('ip6tables-restore', '-c'),
+ ]
+ self.assertEqual(executes, expected)
+ for inp in expected_inputs:
+ self.assertFalse(inp in inputs[0])
+
def _test_initialize_gateway(self, existing, expected, routes=''):
self.flags(fake_network=False)
executes = []
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 77fccd904..1552630fb 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -15,26 +15,33 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mox
import shutil
import tempfile
+import mox
+
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
+from nova import ipv6
from nova.network import linux_net
from nova.network import manager as network_manager
+from nova.network import model as net_model
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
+from nova.openstack.common.rpc import common as rpc_common
import nova.policy
from nova import test
from nova.tests import fake_ldap
from nova.tests import fake_network
+from nova.tests import matchers
from nova import utils
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -136,10 +143,8 @@ class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
- self.flags(logdir=self.tempdir)
+ self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
- self.network.instance_dns_manager = importutils.import_object(
- 'nova.network.minidns.MiniDNS')
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
@@ -168,7 +173,7 @@ class FlatNetworkTestCase(test.TestCase):
'bridge_interface': None,
'vlan': None}
- self.assertDictMatch(nw, check)
+ self.assertThat(nw, matchers.DictMatches(check))
check = {'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
@@ -180,17 +185,19 @@ class FlatNetworkTestCase(test.TestCase):
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
+ 'vif_type': net_model.VIF_TYPE_BRIDGE,
+ 'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'should_create_vlan': False,
'should_create_bridge': False}
- self.assertDictMatch(info, check)
+ self.assertThat(info, matchers.DictMatches(check))
check = [{'enabled': 'DONTCARE',
'ip': '2001:db8:0:1::%x' % nid,
'netmask': 64,
'gateway': 'fe80::def'}]
- self.assertDictListMatch(info['ip6s'], check)
+ self.assertThat(info['ip6s'], matchers.DictListMatches(check))
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
@@ -198,7 +205,7 @@ class FlatNetworkTestCase(test.TestCase):
'netmask': '255.255.255.0',
'gateway': '192.168.%d.1' % nid}
for ip_num in xrange(1, num_fixed_ips + 1)]
- self.assertDictListMatch(info['ips'], check)
+ self.assertThat(info['ips'], matchers.DictListMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
@@ -295,6 +302,9 @@ class FlatNetworkTestCase(test.TestCase):
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
+ db.instance_get(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({'security_groups':
+ [{'id':0, 'name':'test'}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
@@ -327,6 +337,10 @@ class FlatNetworkTestCase(test.TestCase):
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
+ db.instance_get(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({'security_groups':
+ [{'id':0, 'name':'test'}]})
+
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
@@ -370,6 +384,17 @@ class FlatNetworkTestCase(test.TestCase):
"invalidtype",
zone1)
+ def test_mini_dns_driver_with_mixed_case(self):
+ zone1 = "example.org"
+ driver = self.network.instance_dns_manager
+ driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
+ addresses = driver.get_entries_by_address("10.0.0.10", zone1)
+ self.assertEqual(len(addresses), 1)
+ for n in addresses:
+ driver.delete_entry(n, zone1)
+ addresses = driver.get_entries_by_address("10.0.0.10", zone1)
+ self.assertEqual(len(addresses), 0)
+
def test_instance_dns(self):
fixedip = '192.168.0.101'
self.mox.StubOutWithMock(db, 'network_get')
@@ -393,6 +418,9 @@ class FlatNetworkTestCase(test.TestCase):
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
+ db.instance_get(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({'security_groups':
+ [{'id':0, 'name':'test'}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
@@ -645,7 +673,7 @@ class VlanNetworkTestCase(test.TestCase):
is_admin=False)
def fake1(*args, **kwargs):
- pass
+ return '10.0.0.1'
# floating ip that's already associated
def fake2(*args, **kwargs):
@@ -665,6 +693,7 @@ class VlanNetworkTestCase(test.TestCase):
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
+ 'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
@@ -675,6 +704,7 @@ class VlanNetworkTestCase(test.TestCase):
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
+ 'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
@@ -705,6 +735,7 @@ class VlanNetworkTestCase(test.TestCase):
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
+ mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
@@ -714,15 +745,19 @@ class VlanNetworkTestCase(test.TestCase):
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
def fake_fixed_ip_get(context, fixed_ip_id):
- return {'instance_uuid': 'fake_uuid'}
+ return {'address': 'old', 'instance_uuid': 'fake_uuid'}
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
+ # doesn't raise because we exit early if the address is the same
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), 'old')
+
+ # raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
- mox.IgnoreArg())
+ 'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
@@ -761,7 +796,7 @@ class VlanNetworkTestCase(test.TestCase):
def fixed_ip_get(_context, fixed_ip_id):
if fixed_ip_id == 1:
return {'address': 'fakefixed'}
- raise exception.FixedIpNotFound()
+ raise exception.FixedIpNotFound(id=fixed_ip_id)
self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get)
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
@@ -810,6 +845,7 @@ class VlanNetworkTestCase(test.TestCase):
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
+ 'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
@@ -821,6 +857,7 @@ class VlanNetworkTestCase(test.TestCase):
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
+ 'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
@@ -995,7 +1032,7 @@ class VlanNetworkTestCase(test.TestCase):
self.assertFalse(fixed['allocated'])
def test_deallocate_fixed_deleted(self):
- """Verify doesn't deallocate deleted fixed_ip from deleted network"""
+ # Verify doesn't deallocate deleted fixed_ip from deleted network.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
@@ -1059,7 +1096,7 @@ class VlanNetworkTestCase(test.TestCase):
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
def test_fixed_ip_cleanup_fail(self):
- """Verify IP is not deallocated if the security group refresh fails."""
+ # Verify IP is not deallocated if the security group refresh fails.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
@@ -1095,6 +1132,8 @@ class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
+ self.flags(ipv6_backend='rfc2462')
+ ipv6.reset_backend()
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None):
return None
@@ -1106,7 +1145,8 @@ class CommonNetworkTestCase(test.TestCase):
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
- db.fixed_ip_get_by_instance = lambda x, y: [dict(address='1.2.3.4')]
+ db.fixed_ip_get_by_instance = lambda x, y: [dict(address='1.2.3.4',
+ network_id='ignoredid')]
manager.deallocate_for_instance(
ctx, instance_id='ignore', host='somehost')
@@ -1448,7 +1488,9 @@ class CommonNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(
mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(exception.NetworkNotFoundForUUID)
+ mox.IgnoreArg()).AndRaise(
+ exception.NetworkNotFoundForUUID(uuid='fake')
+ )
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
@@ -1483,7 +1525,9 @@ class CommonNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(
mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(exception.NetworkNotFoundForUUID)
+ mox.IgnoreArg()).AndRaise(
+ exception.NetworkNotFoundForUUID(uuid='fake')
+ )
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
@@ -1492,11 +1536,11 @@ class CommonNetworkTestCase(test.TestCase):
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
- """Dummy manager that implements RPCAllocateFixedIP"""
+ """Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
- """Tests nova.network.manager.RPCAllocateFixedIP"""
+ """Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
@@ -1523,15 +1567,30 @@ class RPCAllocateTestCase(test.TestCase):
self.assertEqual(rval, address)
+class BackdoorPortTestCase(test.TestCase):
+ """Tests nova.network.manager.get_backdoor_port."""
+ def setUp(self):
+ super(BackdoorPortTestCase, self).setUp()
+ self.manager = network_manager.NetworkManager()
+ self.manager.backdoor_port = 59697
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_backdoor_port(self):
+ port = self.manager.get_backdoor_port(self.context)
+ self.assertEqual(port, self.manager.backdoor_port)
+
+
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
- """Dummy manager that implements FloatingIP"""
+ """Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
+ self.conductor = self.start_service(
+ 'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
@@ -1545,6 +1604,7 @@ class AllocateTestCase(test.TestCase):
{'address': address,
'pool': 'nova'})
inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
@@ -1566,14 +1626,12 @@ class AllocateTestCase(test.TestCase):
class FloatingIPTestCase(test.TestCase):
- """Tests nova.network.manager.FloatingIP"""
+ """Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
- self.flags(logdir=self.tempdir)
+ self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
- self.network.floating_dns_manager = importutils.import_object(
- 'nova.network.minidns.MiniDNS')
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
@@ -1583,6 +1641,115 @@ class FloatingIPTestCase(test.TestCase):
shutil.rmtree(self.tempdir)
super(FloatingIPTestCase, self).tearDown()
+ def test_disassociate_floating_ip_multi_host_calls(self):
+ floating_ip = {
+ 'fixed_ip_id': 12
+ }
+
+ fixed_ip = {
+ 'network_id': None,
+ 'instance_uuid': 'instance-uuid'
+ }
+
+ network = {
+ 'multi_host': True
+ }
+
+ instance = {
+ 'host': 'some-other-host'
+ }
+
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ self.stubs.Set(self.network.db,
+ 'floating_ip_get_by_address',
+ lambda _x, _y: floating_ip)
+
+ self.stubs.Set(self.network,
+ '_floating_ip_owned_by_project',
+ lambda _x, _y: True)
+
+ self.stubs.Set(self.network.db,
+ 'fixed_ip_get',
+ lambda _x, _y: fixed_ip)
+
+ self.stubs.Set(self.network,
+ '_get_network_by_id',
+ lambda _x, _y: network)
+
+ self.stubs.Set(self.network.db,
+ 'instance_get_by_uuid',
+ lambda _x, _y: instance)
+
+ self.stubs.Set(self.network.db,
+ 'service_get_by_host_and_topic',
+ lambda _x, _y, _z: 'service')
+
+ self.stubs.Set(self.network.servicegroup_api,
+ 'service_is_up',
+ lambda _x: True)
+
+ self.mox.StubOutWithMock(
+ self.network.network_rpcapi, '_disassociate_floating_ip')
+
+ self.network.network_rpcapi._disassociate_floating_ip(
+ ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
+ self.mox.ReplayAll()
+
+ self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
+
+ def test_associate_floating_ip_multi_host_calls(self):
+ floating_ip = {
+ 'fixed_ip_id': None
+ }
+
+ fixed_ip = {
+ 'network_id': None,
+ 'instance_uuid': 'instance-uuid'
+ }
+
+ network = {
+ 'multi_host': True
+ }
+
+ instance = {
+ 'host': 'some-other-host'
+ }
+
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ self.stubs.Set(self.network.db,
+ 'floating_ip_get_by_address',
+ lambda _x, _y: floating_ip)
+
+ self.stubs.Set(self.network,
+ '_floating_ip_owned_by_project',
+ lambda _x, _y: True)
+
+ self.stubs.Set(self.network.db,
+ 'fixed_ip_get_by_address',
+ lambda _x, _y: fixed_ip)
+
+ self.stubs.Set(self.network,
+ '_get_network_by_id',
+ lambda _x, _y: network)
+
+ self.stubs.Set(self.network.db,
+ 'instance_get_by_uuid',
+ lambda _x, _y: instance)
+
+ self.mox.StubOutWithMock(
+ self.network.network_rpcapi, '_associate_floating_ip')
+
+ self.network.network_rpcapi._associate_floating_ip(
+ ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
+ 'instance-uuid')
+ self.mox.ReplayAll()
+
+ self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
+
def test_double_deallocation(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id})
@@ -1665,11 +1832,14 @@ class FloatingIPTestCase(test.TestCase):
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.mox.ReplayAll()
- floating_ip_addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
- self.network.migrate_instance_start(self.context, FAKEUUID,
- 3, self.project_id,
- 'fake_source', 'fake_dest',
- floating_ip_addresses)
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_start(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source',
+ dest='fake_dest')
self.assertEqual(called['count'], 2)
@@ -1703,11 +1873,14 @@ class FloatingIPTestCase(test.TestCase):
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
- floating_ip_addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
- self.network.migrate_instance_finish(self.context, FAKEUUID,
- 3, self.project_id,
- 'fake_source', 'fake_dest',
- floating_ip_addresses)
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_finish(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ host='fake_dest',
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source')
self.assertEqual(called['count'], 2)
@@ -1852,7 +2025,7 @@ class FloatingIPTestCase(test.TestCase):
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
- """Make sure MAC collisions are retried"""
+ # Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
@@ -1883,6 +2056,49 @@ class FloatingIPTestCase(test.TestCase):
self.network.add_virtual_interface(ctxt, 'fake_uuid', 'fake_net')
self.assertEqual(macs, [])
+ def test_deallocate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(rpc_common.ClientException,
+ self.network.deallocate_floating_ip,
+ self.context, '1.2.3.4')
+
+ def test_associate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(rpc_common.ClientException,
+ self.network.associate_floating_ip,
+ self.context, '1.2.3.4', '10.0.0.1')
+
+ def test_disassociate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(rpc_common.ClientException,
+ self.network.disassociate_floating_ip,
+ self.context, '1.2.3.4')
+
+ def test_get_floating_ip_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
+ self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
+ exception.FloatingIpNotFound(id='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(rpc_common.ClientException,
+ self.network.get_floating_ip,
+ self.context, 'fake-id')
+
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
@@ -1897,9 +2113,6 @@ class NetworkPolicyTestCase(test.TestCase):
super(NetworkPolicyTestCase, self).tearDown()
nova.policy.reset()
- def _set_rules(self, rules):
- nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
-
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
@@ -1912,16 +2125,12 @@ class NetworkPolicyTestCase(test.TestCase):
class InstanceDNSTestCase(test.TestCase):
- """Tests nova.network.manager instance DNS"""
+ """Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
- self.flags(logdir=self.tempdir)
+ self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
- self.network.instance_dns_manager = importutils.import_object(
- 'nova.network.minidns.MiniDNS')
- self.network.floating_dns_manager = importutils.import_object(
- 'nova.network.dns_driver.DNSDriver')
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
@@ -1959,11 +2168,11 @@ domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
- """Tests nova.network.ldapdns.LdapDNS"""
+ """Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
- self.stub_module('ldap', fake_ldap)
+ self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py
index c9b17306d..3e19a4461 100644
--- a/nova/tests/network/test_network_info.py
+++ b/nova/tests/network/test_network_info.py
@@ -259,6 +259,19 @@ class VIFTests(test.TestCase):
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
+ def test_create_vif_with_type(self):
+ vif_dict = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ network=fake_network_cache_model.new_network(),
+ type='bridge')
+ vif = fake_network_cache_model.new_vif(vif_dict)
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['type'], 'bridge')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
@@ -296,6 +309,19 @@ class VIFTests(test.TestCase):
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
+ def test_hydrate_vif_with_type(self):
+ vif_dict = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ network=fake_network_cache_model.new_network(),
+ type='bridge')
+ vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['type'], 'bridge')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
class NetworkInfoTests(test.TestCase):
def test_create_model(self):
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index a8f29e012..f92dba443 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -15,6 +15,8 @@
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+import uuid
+
import mox
from nova import context
@@ -24,11 +26,11 @@ from nova.network import quantumv2
from nova.network.quantumv2 import api as quantumapi
from nova.openstack.common import cfg
from nova import test
-from nova import utils
from quantumclient.v2_0 import client
-FLAGS = cfg.CONF
+CONF = cfg.CONF
+
#NOTE: Quantum client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make quantum client throw a custom
@@ -92,9 +94,9 @@ class TestQuantumClient(test.TestCase):
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
- endpoint_url=FLAGS.quantum_url,
+ endpoint_url=CONF.quantum_url,
token=my_context.auth_token,
- timeout=FLAGS.quantum_url_timeout).AndReturn(None)
+ timeout=CONF.quantum_url_timeout).AndReturn(None)
self.mox.ReplayAll()
quantumv2.get_client(my_context)
@@ -107,23 +109,17 @@ class TestQuantumClient(test.TestCase):
my_context)
def test_withouttoken_keystone_not_auth(self):
- # self.flags(quantum_auth_strategy=None) fail to work
- old_quantum_auth_strategy = FLAGS.quantum_auth_strategy
- setattr(FLAGS, 'quantum_auth_strategy', None)
+ self.flags(quantum_auth_strategy=None)
self.flags(quantum_url='http://anyhost/')
self.flags(quantum_url_timeout=30)
my_context = context.RequestContext('userid', 'my_tenantid')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
- endpoint_url=FLAGS.quantum_url,
+ endpoint_url=CONF.quantum_url,
auth_strategy=None,
- timeout=FLAGS.quantum_url_timeout).AndReturn(None)
+ timeout=CONF.quantum_url_timeout).AndReturn(None)
self.mox.ReplayAll()
- try:
- quantumv2.get_client(my_context)
- finally:
- setattr(FLAGS, 'quantum_auth_strategy',
- old_quantum_auth_strategy)
+ quantumv2.get_client(my_context)
class TestQuantumv2(test.TestCase):
@@ -139,8 +135,9 @@ class TestQuantumv2(test.TestCase):
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
- 'uuid': str(utils.gen_uuid()),
+ 'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
+ 'availability_zone': 'nova',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
@@ -225,7 +222,7 @@ class TestQuantumv2(test.TestCase):
self.mox.UnsetStubs()
self.mox.VerifyAll()
finally:
- FLAGS.reset()
+ CONF.reset()
super(TestQuantumv2, self).tearDown()
def _verify_nw_info(self, nw_inf, index=0):
@@ -273,15 +270,21 @@ class TestQuantumv2(test.TestCase):
self._verify_nw_info(nw_inf, i)
def test_get_instance_nw_info_1(self):
- """Test to get one port in one network and subnet."""
+ # Test to get one port in one network and subnet.
+ quantumv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
- """Test to get one port in each of two networks and subnets."""
+ # Test to get one port in each of two networks and subnets.
+ quantumv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets(self):
- """Test get instance_nw_info with networks passed in."""
+ # Test get instance_nw_info with networks passed in.
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
@@ -298,6 +301,9 @@ class TestQuantumv2(test.TestCase):
network_id='my_netid1',
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
+ quantumv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
self.instance,
@@ -305,7 +311,7 @@ class TestQuantumv2(test.TestCase):
self._verify_nw_info(nw_inf, 0)
def test_get_instance_nw_info_without_subnet(self):
- """Test get instance_nw_info for a port without subnet."""
+ # Test get instance_nw_info for a port without subnet.
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
@@ -321,6 +327,9 @@ class TestQuantumv2(test.TestCase):
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn({'networks': []})
+ quantumv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
@@ -391,7 +400,8 @@ class TestQuantumv2(test.TestCase):
else:
fixed_ip = fixed_ips.get(net_id)
if fixed_ip:
- port_req_body['port']['fixed_ip'] = fixed_ip
+ port_req_body['port']['fixed_ips'] = [{'ip_address':
+ fixed_ip}]
port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
@@ -403,13 +413,21 @@ class TestQuantumv2(test.TestCase):
api.allocate_for_instance(self.context, self.instance, **kwargs)
def test_allocate_for_instance_1(self):
- """Allocate one port in one network env."""
+ # Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
- """Allocate one port in two networks env."""
+ # Allocate one port in two networks env.
self._allocate_for_instance(2)
+ def test_allocate_for_instance_accepts_macs_kwargs_None(self):
+ # The macs kwarg should be accepted as None.
+ self._allocate_for_instance(1, macs=None)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_set(self):
+ # The macs kwarg should be accepted, as a set.
+ self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
@@ -510,11 +528,11 @@ class TestQuantumv2(test.TestCase):
api.deallocate_for_instance(self.context, self.instance)
def test_deallocate_for_instance_1(self):
- """Test to deallocate in one port env."""
+ # Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
- """Test to deallocate in two ports env."""
+ # Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_validate_networks(self):
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index de3f19cea..90bffeeaf 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -19,13 +19,12 @@ Unit Tests for nova.network.rpcapi
"""
from nova import context
-from nova import flags
from nova.network import rpcapi as network_rpcapi
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class NetworkRpcAPITestCase(test.TestCase):
@@ -34,7 +33,7 @@ class NetworkRpcAPITestCase(test.TestCase):
rpcapi = network_rpcapi.NetworkAPI()
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
- expected_topic = FLAGS.network_topic
+ expected_topic = CONF.network_topic
expected_msg = rpcapi.make_msg(method, **kwargs)
if 'source_compute' in expected_msg['args']:
# Fix up for migrate_instance_* calls.
@@ -43,16 +42,16 @@ class NetworkRpcAPITestCase(test.TestCase):
args['dest'] = args.pop('dest_compute')
targeted_methods = [
'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
- '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip',
+ '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
'_associate_floating_ip', '_disassociate_floating_ip',
- 'lease_fixed_ip', 'release_fixed_ip',
- 'migrate_instance_start', 'migrate_instance_finish',
+ 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
+ 'migrate_instance_finish', 'get_backdoor_port'
]
if method in targeted_methods and 'host' in kwargs:
if method != 'deallocate_fixed_ip':
del expected_msg['args']['host']
host = kwargs['host']
- expected_topic = rpc.queue_get_for(ctxt, FLAGS.network_topic, host)
+ expected_topic = rpc.queue_get_for(ctxt, CONF.network_topic, host)
expected_msg['version'] = expected_version
self.fake_args = None
@@ -92,6 +91,13 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('disassociate_network', rpc_method='call',
network_uuid='fake_uuid')
+ def test_associate_host_and_project(self):
+ self._test_network_api('associate', rpc_method='call',
+ network_uuid='fake_uuid',
+ associations={'host': "testHost",
+ 'project': 'testProject'},
+ version="1.5")
+
def test_get_fixed_ip(self):
self._test_network_api('get_fixed_ip', rpc_method='call', id='id')
@@ -121,6 +127,10 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('get_instance_id_by_floating_address',
rpc_method='call', address='w.x.y.z')
+ def test_get_backdoor_port(self):
+ self._test_network_api('get_backdoor_port', rpc_method='call',
+ host='fake_host', version='1.4')
+
def test_get_vifs_by_instance(self):
self._test_network_api('get_vifs_by_instance',
rpc_method='call', instance_id='fake_id')
@@ -247,23 +257,20 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('deallocate_fixed_ip', rpc_method='call',
address='fake_addr', host='fake_host')
+ def test_update_dns(self):
+ self._test_network_api('update_dns', rpc_method='fanout_cast',
+ network_ids='fake_id', version='1.3')
+
def test__associate_floating_ip(self):
self._test_network_api('_associate_floating_ip', rpc_method='call',
floating_address='fake_addr', fixed_address='fixed_address',
- interface='fake_interface', host='fake_host')
+ interface='fake_interface', host='fake_host',
+ instance_uuid='fake_uuid', version='1.6')
def test__disassociate_floating_ip(self):
self._test_network_api('_disassociate_floating_ip', rpc_method='call',
address='fake_addr', interface='fake_interface',
- host='fake_host')
-
- def test_lease_fixed_ip(self):
- self._test_network_api('lease_fixed_ip', rpc_method='cast',
- address='fake_addr', host='fake_host')
-
- def test_release_fixed_ip(self):
- self._test_network_api('release_fixed_ip', rpc_method='cast',
- address='fake_addr', host='fake_host')
+ host='fake_host', instance_uuid='fake_uuid', version='1.6')
def test_migrate_instance_start(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
diff --git a/nova/tests/policy_fixture.py b/nova/tests/policy_fixture.py
new file mode 100644
index 000000000..282a28b44
--- /dev/null
+++ b/nova/tests/policy_fixture.py
@@ -0,0 +1,44 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+
+from nova.openstack.common import cfg
+from nova.openstack.common import policy as common_policy
+import nova.policy
+from nova.tests import fake_policy
+
+CONF = cfg.CONF
+
+
+class PolicyFixture(fixtures.Fixture):
+
+ def setUp(self):
+ super(PolicyFixture, self).setUp()
+ self.policy_dir = self.useFixture(fixtures.TempDir())
+ self.policy_file_name = os.path.join(self.policy_dir.path,
+ 'policy.json')
+ with open(self.policy_file_name, 'w') as policy_file:
+ policy_file.write(fake_policy.policy_data)
+ CONF.set_override('policy_file', self.policy_file_name)
+ nova.policy.reset()
+ nova.policy.init()
+ self.addCleanup(nova.policy.reset)
+
+ def set_rules(self, rules):
+ common_policy.set_rules(common_policy.Rules(
+ dict((k, common_policy.parse_rule(v))
+ for k, v in rules.items())))
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index ba255c32c..652893662 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -29,38 +29,42 @@ COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_mb=512, local_gb_used=0, updated_at=None,
- service=dict(host='host1', disabled=False)),
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1'),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_mb=1024, local_gb_used=0, updated_at=None,
- service=dict(host='host2', disabled=True)),
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2'),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_mb=3072, local_gb_used=0, updated_at=None,
- service=dict(host='host3', disabled=False)),
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3'),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_mb=8192, local_gb_used=0, updated_at=None,
- service=dict(host='host4', disabled=False)),
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4'),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host1'),
+ host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host3'),
+ host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host5'),
+ host='host5', node='node5'),
]
@@ -96,22 +100,22 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState):
- def __init__(self, host, topic, attribute_dict):
- super(FakeHostState, self).__init__(host, topic)
+ def __init__(self, host, node, attribute_dict):
+ super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
class FakeInstance(object):
def __init__(self, context=None, params=None, type_name='m1.tiny'):
- """Create a test instance. Returns uuid"""
+ """Create a test instance. Returns uuid."""
self.context = context
i = self._create_fake_instance(params, type_name=type_name)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index be6bc3317..5d8e8236b 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -27,12 +27,12 @@ from nova import exception
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
-from nova.scheduler import least_cost
+from nova.scheduler import weights
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
-def fake_filter_hosts(hosts, filter_properties):
+def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
@@ -98,17 +98,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_context, request_spec, None, None, None, None, {})
self.assertTrue(self.was_admin)
- def test_schedule_bad_topic(self):
- """Parameter checking."""
- sched = fakes.FakeFilterScheduler()
- fake_context = context.RequestContext('user', 'project')
- self.assertRaises(NotImplementedError, sched._schedule, fake_context,
- "foo", {}, {})
-
def test_scheduler_includes_launch_index(self):
fake_context = context.RequestContext('user', 'project')
- fake_kwargs = {'fake_kwarg1': 'fake_value1',
- 'fake_kwarg2': 'fake_value2'}
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
@@ -129,9 +120,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
- self.driver._schedule(fake_context, 'compute',
- request_spec, {}, ['fake-uuid1', 'fake-uuid2']
- ).AndReturn(['host1', 'host2'])
+ self.driver._schedule(fake_context, request_spec, {},
+ ['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
fake_context, 'host1',
@@ -155,19 +145,19 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.next_weight = 1.0
- def _fake_weighted_sum(functions, hosts, options):
+ def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
- return least_cost.WeightedHost(self.next_weight,
- host_state=host_state)
+ return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
- self.stubs.Set(sched.host_manager, 'filter_hosts',
- fake_filter_hosts)
- self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
@@ -181,11 +171,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
- weighted_hosts = sched._schedule(fake_context, 'compute',
- request_spec, {})
- self.assertEquals(len(weighted_hosts), 10)
- for weighted_host in weighted_hosts:
- self.assertTrue(weighted_host.host_state is not None)
+ weighed_hosts = sched._schedule(fake_context, request_spec, {})
+ self.assertEquals(len(weighed_hosts), 10)
+ for weighed_host in weighed_hosts:
+ self.assertTrue(weighed_host.obj is not None)
def test_schedule_prep_resize_doesnt_update_host(self):
fake_context = context.RequestContext('user', 'project',
@@ -194,8 +183,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
- host_state = host_manager.HostState('host2', 'compute')
- return [least_cost.WeightedHost(1.0, host_state=host_state)]
+ host_state = host_manager.HostState('host2', 'node2')
+ return [weights.WeighedHost(host_state, 1.0)]
self.stubs.Set(sched, '_schedule', _return_hosts)
@@ -214,19 +203,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance, {}, None)
self.assertEqual(info['called'], 0)
- def test_get_cost_functions(self):
- fixture = fakes.FakeFilterScheduler()
- fns = fixture.get_cost_functions()
- self.assertEquals(len(fns), 1)
- weight, fn = fns[0]
- self.assertEquals(weight, -1.0)
- hostinfo = host_manager.HostState('host', 'compute')
- hostinfo.update_from_compute_node(dict(memory_mb=1000,
- local_gb=0, vcpus=1, disk_available_least=1000,
- free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
- local_gb_used=0, updated_at=None))
- self.assertEquals(872, fn(hostinfo, {}))
-
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
@@ -240,7 +216,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
- """Retry info should not get populated when re-scheduling is off"""
+ # Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
@@ -248,14 +224,14 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertFalse("retry" in filter_properties)
def test_retry_attempt_one(self):
- """Test retry logic on initial scheduling attempt"""
+ # Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -263,14 +239,14 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
- """Test retry logic when re-scheduling"""
+ # Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -280,14 +256,14 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
- """Test for necessary explosion when max retries is exceeded"""
+ # Test for necessary explosion when max retries is exceeded.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -298,37 +274,39 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
- 'compute', request_spec, filter_properties=filter_properties)
+ request_spec, filter_properties=filter_properties)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
+ node = "fakenode"
sched = fakes.FakeFilterScheduler()
- sched._add_retry_host(filter_properties, host)
+ sched._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
- self.assertEqual(host, hosts[0])
+ self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
- """Test addition of certain filter props after a host is selected"""
+ # Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
- host_state = host_manager.HostState('host', 'compute')
+ host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
sched._post_select_populate_filter_properties(filter_properties,
host_state)
- self.assertEqual('host', filter_properties['retry']['hosts'][0])
+ self.assertEqual(['host', 'node'],
+ filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpus': 5}, host_state.limits)
def test_prep_resize_post_populates_retry(self):
- """Prep resize should add a 'host' entry to the retry dict"""
+ # Prep resize should add a ('host', 'node') entry to the retry dict.
sched = fakes.FakeFilterScheduler()
image = 'image'
@@ -342,21 +320,22 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
reservations = None
- host = fakes.FakeHostState('host', 'compute', {})
- weighted_host = least_cost.WeightedHost(1, host)
- hosts = [weighted_host]
+ host = fakes.FakeHostState('host', 'node', {})
+ weighed_host = weights.WeighedHost(host, 1)
+ weighed_hosts = [weighed_host]
self.mox.StubOutWithMock(sched, '_schedule')
self.mox.StubOutWithMock(sched.compute_rpcapi, 'prep_resize')
- sched._schedule(self.context, 'compute', request_spec,
- filter_properties, [instance['uuid']]).AndReturn(hosts)
+ sched._schedule(self.context, request_spec, filter_properties,
+ [instance['uuid']]).AndReturn(weighed_hosts)
sched.compute_rpcapi.prep_resize(self.context, image, instance,
instance_type, 'host', reservations, request_spec=request_spec,
- filter_properties=filter_properties)
+ filter_properties=filter_properties, node='node')
self.mox.ReplayAll()
sched.schedule_prep_resize(self.context, image, request_spec,
filter_properties, instance, instance_type, reservations)
- self.assertEqual(['host'], filter_properties['retry']['hosts'])
+ self.assertEqual([['host', 'node']],
+ filter_properties['retry']['hosts'])
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index e789f169e..9f7f189cc 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -20,40 +20,17 @@ import stubout
from nova import context
from nova import db
-from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
from nova.scheduler.filters.trusted_filter import AttestationService
+from nova import servicegroup
from nova import test
from nova.tests.scheduler import fakes
-from nova import utils
-
-DATA = ''
-
-
-def stub_out_https_backend(stubs):
- """
- Stubs out the httplib.HTTPRequest.getresponse to return
- faked-out data instead of grabbing actual contents of a resource
-
- The stubbed getresponse() returns an iterator over
- the data "I am a teapot, short and stout\n"
-
- :param stubs: Set of stubout stubs
- """
-
- class FakeHTTPResponse(object):
-
- def read(self):
- return DATA
-
- def fake_do_request(self, *args, **kwargs):
- return httplib.OK, FakeHTTPResponse()
-
- stubs.Set(AttestationService, '_do_request', fake_do_request)
+CONF = cfg.CONF
+CONF.import_opt('my_ip', 'nova.netconf')
class TestFilter(filters.BaseHostFilter):
@@ -61,7 +38,7 @@ class TestFilter(filters.BaseHostFilter):
class TestBogusFilter(object):
- """Class that doesn't inherit from BaseHostFilter"""
+ """Class that doesn't inherit from BaseHostFilter."""
pass
@@ -254,54 +231,60 @@ class ExtraSpecsOpsTestCase(test.TestCase):
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
+ def fake_oat_request(self, *args, **kwargs):
+ """Stubs out the response from OAT service."""
+ return httplib.OK, jsonutils.loads(self.oat_data)
+
def setUp(self):
super(HostFiltersTestCase, self).setUp()
+ self.oat_data = ''
self.stubs = stubout.StubOutForTesting()
- stub_out_https_backend(self.stubs)
+ self.stubs.Set(AttestationService, '_request', self.fake_oat_request)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
- # This has a side effect of testing 'get_filter_classes'
- # when specifying a method (in this case, our standard filters)
- classes = filters.get_filter_classes(
- ['nova.scheduler.filters.standard_filters'])
+ filter_handler = filters.HostFilterHandler()
+ classes = filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.all_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
- def test_get_filter_classes(self):
- classes = filters.get_filter_classes(
- ['nova.tests.scheduler.test_host_filters.TestFilter'])
- self.assertEqual(len(classes), 1)
- self.assertEqual(classes[0].__name__, 'TestFilter')
- # Test a specific class along with our standard filters
- classes = filters.get_filter_classes(
- ['nova.tests.scheduler.test_host_filters.TestFilter',
- 'nova.scheduler.filters.standard_filters'])
- self.assertEqual(len(classes), 1 + len(self.class_map))
-
- def test_get_filter_classes_raises_on_invalid_classes(self):
- self.assertRaises(ImportError,
- filters.get_filter_classes,
- ['nova.tests.scheduler.test_host_filters.NoExist'])
- self.assertRaises(exception.ClassNotFound,
- filters.get_filter_classes,
- ['nova.tests.scheduler.test_host_filters.TestBogusFilter'])
+ def test_standard_filters_is_deprecated(self):
+ info = {'called': False}
+
+ def _fake_deprecated(*args, **kwargs):
+ info['called'] = True
+
+ self.stubs.Set(filters.LOG, 'deprecated', _fake_deprecated)
+
+ filter_handler = filters.HostFilterHandler()
+ filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.standard_filters'])
+
+ self.assertTrue(info['called'])
+ self.assertIn('AllHostsFilter', self.class_map)
+ self.assertIn('ComputeFilter', self.class_map)
+
+ def test_all_filters(self):
+ # Double check at least a couple of known filters exist
+ self.assertIn('AllHostsFilter', self.class_map)
+ self.assertIn('ComputeFilter', self.class_map)
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
- def fake_service_is_up(service):
- return ret_value
- self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
+ def fake_service_is_up(self, service):
+ return ret_value
+ self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -314,7 +297,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -327,7 +310,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -340,7 +323,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -352,7 +335,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -365,7 +348,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -378,7 +361,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -391,7 +374,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -403,7 +386,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -417,7 +400,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -431,9 +414,9 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
- affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
+ affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
@@ -448,7 +431,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -464,7 +447,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host', 'compute',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since empty
@@ -490,7 +473,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_type': {'name': 'fake2'}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host', 'compute',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since no aggregates
@@ -509,7 +492,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -521,7 +504,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -533,7 +516,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -547,7 +530,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -560,7 +543,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -574,7 +557,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -589,7 +572,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -600,7 +583,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -611,7 +594,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -622,33 +605,11 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_on_volume(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeFilter']()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- capabilities = {'enabled': False}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'volume',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_compute_filter_passes_on_no_instance_type(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeFilter']()
- filter_properties = {}
- capabilities = {'enabled': False}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
def test_image_properties_filter_passes_same_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
@@ -659,7 +620,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -673,7 +634,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -686,7 +647,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -699,7 +660,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'xen', 'xen')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -710,7 +671,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -722,7 +683,7 @@ class HostFiltersTestCase(test.TestCase):
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -734,7 +695,7 @@ class HostFiltersTestCase(test.TestCase):
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
assertion = self.assertTrue if passes else self.assertFalse
@@ -752,6 +713,27 @@ class HostFiltersTestCase(test.TestCase):
especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
passes=False)
+ def test_compute_filter_pass_extra_specs_simple_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'capabilities:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'wrong_scope:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': {'a': '1', 'b': {'aa': '2'}}, 'opt2': '2'},
+ especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
def test_aggregate_filter_passes_no_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
@@ -759,18 +741,21 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _create_aggregate_with_host(self, name='fake_aggregate',
metadata=None,
hosts=['host1']):
- values = {'name': name,
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': name}
+ if metadata:
+ metadata['availability_zone'] = 'fake_avail_zone'
+ else:
+ metadata = {'availability_zone': 'fake_avail_zone'}
result = db.aggregate_create(self.context.elevated(), values, metadata)
for host in hosts:
- db.aggregate_host_add(self.context.elevated(), result.id, host)
+ db.aggregate_host_add(self.context.elevated(), result['id'], host)
return result
def _do_test_aggregate_filter_extra_specs(self, emeta, especs, passes):
@@ -779,7 +764,8 @@ class HostFiltersTestCase(test.TestCase):
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'compute', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
@@ -793,8 +779,9 @@ class HostFiltersTestCase(test.TestCase):
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {'free_ram_mb': 1024})
- db.aggregate_host_delete(self.context.elevated(), agg2.id, 'host1')
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
+ db.aggregate_host_delete(self.context.elevated(), agg2['id'], 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_passes_extra_specs_simple(self):
@@ -819,7 +806,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', 'compute', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
@@ -830,7 +817,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('isolated', 'compute', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
@@ -841,7 +828,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('isolated', 'compute', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
@@ -852,7 +839,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', 'compute', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
@@ -862,7 +849,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -874,7 +861,7 @@ class HostFiltersTestCase(test.TestCase):
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
@@ -887,7 +874,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -900,7 +887,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
@@ -917,7 +904,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -934,14 +921,14 @@ class HostFiltersTestCase(test.TestCase):
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
- """Test json filter more thoroughly"""
+ # Test json filter more thoroughly.
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
@@ -962,7 +949,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
@@ -972,7 +959,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -982,7 +969,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'instance_type',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -992,7 +979,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
@@ -1002,7 +989,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1012,7 +999,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1021,7 +1008,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
@@ -1090,14 +1077,14 @@ class HostFiltersTestCase(test.TestCase):
'query': jsonutils.dumps(raw),
},
}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = []
@@ -1117,7 +1104,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
@@ -1138,7 +1125,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
@@ -1161,72 +1148,72 @@ class HostFiltersTestCase(test.TestCase):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
- global DATA
- DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
+ self.oat_data =\
+ '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
- global DATA
- DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
+ self.oat_data =\
+ '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
- global DATA
- DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
+ self.oat_data =\
+ '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
- global DATA
- DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
+ self.oat_data =\
+ '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1246,43 +1233,49 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
- host = fakes.FakeHostState('host1', 'compute', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
- host = fakes.FakeHostState('host1', 'compute', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
- """Test case where retry/re-scheduling is disabled"""
+ # Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
- """Host not previously tried"""
+ # Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
- retry = dict(num_attempts=1, hosts=['host2', 'host3'])
+ host = fakes.FakeHostState('host1', 'nodeX', {})
+ retry = dict(num_attempts=2,
+ hosts=[['host1', 'node1'], # same host, different node
+ ['host2', 'node2'], # different host and node
+ ])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
- """Host was already tried"""
+ # Node was already tried.
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
- retry = dict(num_attempts=1, hosts=['host3', 'host1'])
+ host = fakes.FakeHostState('host1', 'node1', {})
+ retry = dict(num_attempts=1,
+ hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1290,13 +1283,13 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 8})
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1304,7 +1297,7 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 4d1e00852..ae7774bac 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -15,118 +15,215 @@
"""
Tests For HostManager
"""
-
-
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.openstack.common import timeutils
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova import test
+from nova.tests import matchers
from nova.tests.scheduler import fakes
-class ComputeFilterClass1(object):
- def host_passes(self, *args, **kwargs):
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
pass
-class ComputeFilterClass2(object):
- def host_passes(self, *args, **kwargs):
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
pass
class HostManagerTestCase(test.TestCase):
- """Test case for HostManager class"""
+ """Test case for HostManager class."""
def setUp(self):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
+ 'fake-node') for x in xrange(1, 5)]
+
+ def tearDown(self):
+ timeutils.clear_time_override()
+ super(HostManagerTestCase, self).tearDown()
def test_choose_host_filters_not_found(self):
- self.flags(scheduler_default_filters='ComputeFilterClass3')
- self.host_manager.filter_classes = [ComputeFilterClass1,
- ComputeFilterClass2]
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
- self.flags(scheduler_default_filters=['ComputeFilterClass2'])
- self.host_manager.filter_classes = [ComputeFilterClass1,
- ComputeFilterClass2]
-
- # Test 'compute' returns 1 correct function
- filter_fns = self.host_manager._choose_host_filters(None)
- self.assertEqual(len(filter_fns), 1)
- self.assertEqual(filter_fns[0].__func__,
- ComputeFilterClass2.host_passes.__func__)
-
- def test_filter_hosts(self):
- topic = 'fake_topic'
-
- filters = ['fake-filter1', 'fake-filter2']
- fake_host1 = host_manager.HostState('host1', topic)
- fake_host2 = host_manager.HostState('host2', topic)
- hosts = [fake_host1, fake_host2]
- filter_properties = {'fake_prop': 'fake_val'}
-
- self.mox.StubOutWithMock(self.host_manager,
- '_choose_host_filters')
- self.mox.StubOutWithMock(fake_host1, 'passes_filters')
- self.mox.StubOutWithMock(fake_host2, 'passes_filters')
-
- self.host_manager._choose_host_filters(None).AndReturn(filters)
- fake_host1.passes_filters(filters, filter_properties).AndReturn(
- False)
- fake_host2.passes_filters(filters, filter_properties).AndReturn(
- True)
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(len(filter_classes), 1)
+ self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(result), set(info['got_objs']))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specificed_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore_and_force(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
- filtered_hosts = self.host_manager.filter_hosts(hosts,
- filter_properties, filters=None)
- self.assertEqual(len(filtered_hosts), 1)
- self.assertEqual(filtered_hosts[0], fake_host2)
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
- self.assertDictMatch(service_states, {})
+ self.assertEqual(len(service_states.keys()), 0)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
- timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
- timestamp=1)
- host1_volume_capabs = dict(free_disk=4321, timestamp=1)
- host2_compute_capabs = dict(free_memory=8756, timestamp=1)
+ timestamp=1, hypervisor_hostname='node1')
+ host2_compute_capabs = dict(free_memory=8756, timestamp=1,
+ hypervisor_hostname='node2')
self.mox.ReplayAll()
self.host_manager.update_service_capabilities('compute', 'host1',
host1_compute_capabs)
- self.host_manager.update_service_capabilities('volume', 'host1',
- host1_volume_capabs)
self.host_manager.update_service_capabilities('compute', 'host2',
host2_compute_capabs)
- # Make sure dictionary isn't re-assigned
- self.assertEqual(self.host_manager.service_states, service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(host1_compute_capabs['timestamp'], 1)
host1_compute_capabs['timestamp'] = 31337
- host1_volume_capabs['timestamp'] = 31338
host2_compute_capabs['timestamp'] = 31339
- expected = {'host1': {'compute': host1_compute_capabs,
- 'volume': host1_volume_capabs},
- 'host2': {'compute': host2_compute_capabs}}
- self.assertDictMatch(service_states, expected)
+ expected = {('host1', 'node1'): host1_compute_capabs,
+ ('host2', 'node2'): host2_compute_capabs}
+ self.assertThat(service_states, matchers.DictMatches(expected))
+
+ def test_update_service_capabilities_node_key(self):
+ service_states = self.host_manager.service_states
+ self.assertThat(service_states, matchers.DictMatches({}))
+
+ host1_cap = {'hypervisor_hostname': 'host1-hvhn'}
+ host2_cap = {}
+
+ timeutils.set_time_override(31337)
+ self.host_manager.update_service_capabilities('compute', 'host1',
+ host1_cap)
+ timeutils.set_time_override(31338)
+ self.host_manager.update_service_capabilities('compute', 'host2',
+ host2_cap)
+ host1_cap['timestamp'] = 31337
+ host2_cap['timestamp'] = 31338
+ expected = {('host1', 'host1-hvhn'): host1_cap,
+ ('host2', None): host2_cap}
+ self.assertThat(service_states, matchers.DictMatches(expected))
def test_get_all_host_states(self):
context = 'fake_context'
- topic = 'compute'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
@@ -136,7 +233,7 @@ class HostManagerTestCase(test.TestCase):
host_manager.LOG.warn("No service for compute ID 5")
self.mox.ReplayAll()
- self.host_manager.get_all_host_states(context, topic)
+ self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
@@ -144,113 +241,38 @@ class HostManagerTestCase(test.TestCase):
for i in xrange(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
- self.assertEqual(host_states_map[host].service,
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
compute_node['service'])
- self.assertEqual(host_states_map['host1'].free_ram_mb, 512)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
# 511GB
- self.assertEqual(host_states_map['host1'].free_disk_mb, 524288)
- self.assertEqual(host_states_map['host2'].free_ram_mb, 1024)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
# 1023GB
- self.assertEqual(host_states_map['host2'].free_disk_mb, 1048576)
- self.assertEqual(host_states_map['host3'].free_ram_mb, 3072)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
# 3071GB
- self.assertEqual(host_states_map['host3'].free_disk_mb, 3145728)
- self.assertEqual(host_states_map['host4'].free_ram_mb, 8192)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
# 8191GB
- self.assertEqual(host_states_map['host4'].free_disk_mb, 8388608)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
class HostStateTestCase(test.TestCase):
- """Test case for HostState class"""
+ """Test case for HostState class."""
# update_from_compute_node() and consume_from_instance() are tested
# in HostManagerTestCase.test_get_all_host_states()
- def test_host_state_passes_filters_passes(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- cls1.host_passes(fake_host, filter_properties).AndReturn(True)
- cls2.host_passes(fake_host, filter_properties).AndReturn(True)
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertTrue(result)
-
- def test_host_state_passes_filters_passes_with_ignore(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {'ignore_hosts': ['host2']}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- cls1.host_passes(fake_host, filter_properties).AndReturn(True)
- cls2.host_passes(fake_host, filter_properties).AndReturn(True)
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertTrue(result)
-
- def test_host_state_passes_filters_fails(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- cls1.host_passes(fake_host, filter_properties).AndReturn(False)
- # cls2.host_passes() not called because of short circuit
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertFalse(result)
-
- def test_host_state_passes_filters_fails_from_ignore(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {'ignore_hosts': ['host1']}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- # cls[12].host_passes() not called because of short circuit
- # with matching host to ignore
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertFalse(result)
-
- def test_host_state_passes_filters_skipped_from_force(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {'force_hosts': ['host1']}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- # cls[12].host_passes() not called because of short circuit
- # with matching host to force
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertTrue(result)
-
def test_stat_consumption_from_compute_node(self):
stats = [
dict(key='num_instances', value='5'),
@@ -268,7 +290,7 @@ class HostStateTestCase(test.TestCase):
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
updated_at=None)
- host = host_manager.HostState("fakehost", "faketopic")
+ host = host_manager.HostState("fakehost", "fakenode")
host.update_from_compute_node(compute)
self.assertEqual(5, host.num_instances)
@@ -283,7 +305,7 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(42, host.num_io_ops)
def test_stat_consumption_from_instance(self):
- host = host_manager.HostState("fakehost", "faketopic")
+ host = host_manager.HostState("fakehost", "fakenode")
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index 64cda0b2a..d159d8f4c 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -1,4 +1,4 @@
-# Copyright 2011 OpenStack LLC.
+# Copyright 2011-2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,38 +16,92 @@
Tests For Least Cost functions.
"""
from nova import context
-from nova.scheduler import host_manager
-from nova.scheduler import least_cost
+from nova.openstack.common import cfg
+from nova.scheduler import weights
+from nova.scheduler.weights import least_cost
from nova import test
from nova.tests.scheduler import fakes
-def offset(hostinfo, options):
+test_least_cost_opts = [
+ cfg.FloatOpt('compute_fake_weigher1_weight',
+ default=2.0,
+ help='How much weight to give the fake_weigher1 function'),
+ cfg.FloatOpt('compute_fake_weigher2_weight',
+ default=1.0,
+ help='How much weight to give the fake_weigher2 function'),
+ ]
+
+CONF = cfg.CONF
+CONF.import_opt('least_cost_functions', 'nova.scheduler.weights.least_cost')
+CONF.import_opt('compute_fill_first_cost_fn_weight',
+ 'nova.scheduler.weights.least_cost')
+CONF.register_opts(test_least_cost_opts)
+
+
+def compute_fake_weigher1(hostinfo, options):
return hostinfo.free_ram_mb + 10000
-def scale(hostinfo, options):
+def compute_fake_weigher2(hostinfo, options):
return hostinfo.free_ram_mb * 2
class LeastCostTestCase(test.TestCase):
def setUp(self):
super(LeastCostTestCase, self).setUp()
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ weigher_classes = least_cost.get_least_cost_weighers()
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(weigher_classes,
+ hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
fakes.mox_host_manager_db_calls(self.mox, ctxt)
self.mox.ReplayAll()
- host_states = self.host_manager.get_all_host_states(ctxt,
- 'compute')
+ host_states = self.host_manager.get_all_host_states(ctxt)
self.mox.VerifyAll()
self.mox.ResetAll()
return host_states
- def test_weighted_sum_happy_day(self):
- fn_tuples = [(1.0, offset), (1.0, scale)]
+ def test_default_of_spread_first(self):
+ # Default modifier is -1.0, so it turns out that hosts with
+ # the most free memory win
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host1 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 8192)
+ self.assertEqual(weighed_host.obj.host, 'host4')
+
+ def test_filling_first(self):
+ self.flags(compute_fill_first_cost_fn_weight=1.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=-512
+ # host2: free_ram_mb=-1024
+ # host3: free_ram_mb=-3072
+ # host4: free_ram_mb=-8192
+
+ # so, host1 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, -512)
+ self.assertEqual(weighed_host.obj.host, 'host1')
+
+ def test_weighted_sum_provided_method(self):
+ fns = ['nova.tests.scheduler.test_least_cost.compute_fake_weigher1',
+ 'nova.tests.scheduler.test_least_cost.compute_fake_weigher2']
+ self.flags(least_cost_functions=fns)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
@@ -59,18 +113,17 @@ class LeastCostTestCase(test.TestCase):
# [10512, 11024, 13072, 18192]
# [1024, 2048, 6144, 16384]
- # adjusted [ 1.0 * x + 1.0 * y] =
- # [11536, 13072, 19216, 34576]
+ # adjusted [ 2.0 * x + 1.0 * y] =
+ # [22048, 24096, 32288, 52768]
# so, host1 should win:
- options = {}
- weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
- options)
- self.assertEqual(weighted_host.weight, 11536)
- self.assertEqual(weighted_host.host_state.host, 'host1')
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 52768)
+ self.assertEqual(weighed_host.obj.host, 'host4')
def test_weighted_sum_single_function(self):
- fn_tuples = [(1.0, offset), ]
+ fns = ['nova.tests.scheduler.test_least_cost.compute_fake_weigher1']
+ self.flags(least_cost_functions=fns)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=0
@@ -80,24 +133,10 @@ class LeastCostTestCase(test.TestCase):
# [offset, ]=
# [10512, 11024, 13072, 18192]
+ # adjusted [ 2.0 * x ]=
+ # [21024, 22048, 26144, 36384]
# so, host1 should win:
- options = {}
- weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
- options)
- self.assertEqual(weighted_host.weight, 10512)
- self.assertEqual(weighted_host.host_state.host, 'host1')
-
-
-class TestWeightedHost(test.TestCase):
- def test_dict_conversion_without_host_state(self):
- host = least_cost.WeightedHost('someweight')
- expected = {'weight': 'someweight'}
- self.assertDictMatch(host.to_dict(), expected)
-
- def test_dict_conversion_with_host_state(self):
- host_state = host_manager.HostState('somehost', 'sometopic')
- host = least_cost.WeightedHost('someweight', host_state)
- expected = {'weight': 'someweight',
- 'host': 'somehost'}
- self.assertDictMatch(host.to_dict(), expected)
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 36384)
+ self.assertEqual(weighed_host.obj.host, 'host4')
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
index ee9e0bbd3..5642c4e17 100644
--- a/nova/tests/scheduler/test_multi_scheduler.py
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -45,7 +45,7 @@ class FakeDefaultScheduler(driver.Scheduler):
class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
- """Test case for multi driver"""
+ """Test case for multi driver."""
driver_cls = multi.MultiScheduler
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index 8cf741118..e9a1680a8 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -19,13 +19,12 @@ Unit Tests for nova.scheduler.rpcapi
"""
from nova import context
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class SchedulerRpcAPITestCase(test.TestCase):
@@ -37,6 +36,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = expected_version
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
+
self.fake_args = None
self.fake_kwargs = None
@@ -51,7 +53,7 @@ class SchedulerRpcAPITestCase(test.TestCase):
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
- expected_args = [ctxt, FLAGS.scheduler_topic, expected_msg]
+ expected_args = [ctxt, CONF.scheduler_topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
@@ -82,4 +84,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
def test_update_service_capabilities(self):
self._test_scheduler_api('update_service_capabilities',
rpc_method='fanout_cast', service_name='fake_name',
- host='fake_host', capabilities='fake_capabilities')
+ host='fake_host', capabilities='fake_capabilities',
+ version='2.4')
+
+ def test_get_backdoor_port(self):
+ self._test_scheduler_api('get_backdoor_port', rpc_method='call',
+ host='fake_host', version='2.5')
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 6a0e93b7d..dd5b0ae32 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -29,21 +29,18 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
-from nova.openstack.common import timeutils
from nova.scheduler import driver
from nova.scheduler import manager
+from nova import servicegroup
from nova import test
+from nova.tests import matchers
from nova.tests.scheduler import fakes
-from nova import utils
-
-FLAGS = flags.FLAGS
class SchedulerManagerTestCase(test.TestCase):
- """Test case for scheduler manager"""
+ """Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
@@ -89,16 +86,38 @@ class SchedulerManagerTestCase(test.TestCase):
service_name=service_name, host=host,
capabilities=capabilities)
+ def test_update_service_multiple_capabilities(self):
+ service_name = 'fake_service'
+ host = 'fake_host'
+
+ self.mox.StubOutWithMock(self.manager.driver,
+ 'update_service_capabilities')
+
+ capab1 = {'fake_capability': 'fake_value1'},
+ capab2 = {'fake_capability': 'fake_value2'},
+ capab3 = None
+ self.manager.driver.update_service_capabilities(
+ service_name, host, capab1)
+ self.manager.driver.update_service_capabilities(
+ service_name, host, capab2)
+ # None is converted to {}
+ self.manager.driver.update_service_capabilities(
+ service_name, host, {})
+ self.mox.ReplayAll()
+ self.manager.update_service_capabilities(self.context,
+ service_name=service_name, host=host,
+ capabilities=[capab1, capab2, capab3])
+
def test_show_host_resources(self):
host = 'fake_host'
- computes = [{'host': host,
- 'compute_node': [{'vcpus': 4,
- 'vcpus_used': 2,
- 'memory_mb': 1024,
- 'memory_mb_used': 512,
- 'local_gb': 1024,
- 'local_gb_used': 512}]}]
+ compute_node = {'host': host,
+ 'compute_node': [{'vcpus': 4,
+ 'vcpus_used': 2,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 512,
+ 'local_gb': 1024,
+ 'local_gb_used': 512}]}
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
@@ -115,11 +134,11 @@ class SchedulerManagerTestCase(test.TestCase):
'root_gb': 256,
'ephemeral_gb': 0}]
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.service_get_all_compute_by_host(self.context, host).AndReturn(
- computes)
+ db.service_get_by_compute_host(self.context, host).AndReturn(
+ compute_node)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
@@ -138,7 +157,7 @@ class SchedulerManagerTestCase(test.TestCase):
'local_gb_used': 512,
'memory_mb': 1024,
'memory_mb_used': 512}}
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
def _mox_schedule_method_helper(self, method_name):
# Make sure the method exists that we're going to test call
@@ -249,7 +268,7 @@ class SchedulerManagerTestCase(test.TestCase):
class SchedulerTestCase(test.TestCase):
- """Test case for base scheduler driver class"""
+ """Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
@@ -260,6 +279,7 @@ class SchedulerTestCase(test.TestCase):
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
+ self.servicegroup_api = servicegroup.API()
def test_update_service_capabilities(self):
service_name = 'fake_service'
@@ -281,12 +301,12 @@ class SchedulerTestCase(test.TestCase):
services = [service1, service2]
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
- self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
- utils.service_is_up(service1).AndReturn(False)
- utils.service_is_up(service2).AndReturn(True)
+ self.servicegroup_api.service_is_up(service1).AndReturn(False)
+ self.servicegroup_api.service_is_up(service2).AndReturn(True)
self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
@@ -305,7 +325,7 @@ class SchedulerTestCase(test.TestCase):
'task_state': ''}
def test_live_migration_basic(self):
- """Test basic schedule_live_migration functionality"""
+ # Test basic schedule_live_migration functionality.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
@@ -318,8 +338,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
@@ -339,10 +357,10 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self):
- """Test live migration when all checks pass."""
+ # Test live migration when all checks pass.
- self.mox.StubOutWithMock(utils, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
@@ -353,34 +371,32 @@ class SchedulerTestCase(test.TestCase):
block_migration = True
disk_over_commit = True
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
# Source checks
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
- utils.service_is_up('fake_service2').AndReturn(True)
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
+ self.servicegroup_api.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
- utils.service_is_up('fake_service3').AndReturn(True)
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
+ self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'memory_mb': 2048,
- 'hypervisor_version': 1}]}])
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'memory_mb': 2048,
+ 'hypervisor_version': 1}]})
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=256), dict(memory_mb=512)])
# Common checks (same hypervisor, etc)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1,
- 'cpu_info': 'fake_cpu_info'}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'cpu_info': 'fake_cpu_info'}]})
rpc.call(self.context, "compute.fake_host2",
{"method": 'check_can_live_migrate_destination',
@@ -402,7 +418,7 @@ class SchedulerTestCase(test.TestCase):
self.assertEqual(result, None)
def test_live_migration_instance_not_running(self):
- """The instance given by instance_id is not running."""
+ # The instance given by instance_id is not running.
dest = 'fake_host2'
block_migration = False
@@ -417,10 +433,10 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_exist(self):
- """Raise exception when src compute node is does not exist."""
+ # Raise exception when src compute node is does not exist.
- self.mox.StubOutWithMock(utils, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -428,9 +444,9 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context,
instance['host']).AndRaise(
- exception.NotFound())
+ exception.ComputeHostNotFound(host='fake'))
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
@@ -440,10 +456,10 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self):
- """Raise exception when src compute node is not alive."""
+ # Raise exception when src compute node is not alive.
- self.mox.StubOutWithMock(utils, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -451,9 +467,9 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
- utils.service_is_up('fake_service2').AndReturn(False)
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
+ self.servicegroup_api.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
@@ -463,11 +479,11 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_dest_not_alive(self):
- """Raise exception when dest compute node is not alive."""
+ # Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
- self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
dest = 'fake_host2'
block_migration = False
@@ -475,10 +491,10 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
# Compute is down
- utils.service_is_up('fake_service3').AndReturn(False)
+ self.servicegroup_api.service_is_up('fake_service3').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
@@ -488,22 +504,20 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self):
- """Confirms exception raises in case dest and src is same host."""
+ # Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
- self.mox.StubOutWithMock(utils, 'service_is_up')
-
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
block_migration = False
- disk_over_commit = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
- utils.service_is_up('fake_service3').AndReturn(True)
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
+ self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.UnableToMigrateToSelf,
@@ -513,11 +527,11 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
- """Confirms exception raises when dest doesn't have enough memory."""
+ # Confirms exception raises when dest doesn't have enough memory.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
- self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
@@ -527,9 +541,9 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
- utils.service_is_up('fake_service3').AndReturn(True)
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
+ self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
{'memory_mb': 2048})
@@ -544,13 +558,13 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
- """Confirm live_migration to hypervisor of different type raises"""
+ # Confirm live_migration to hypervisor of different type raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -560,13 +574,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'not-xen',
- 'hypervisor_version': 1}]}])
+ {'compute_node': [{'hypervisor_type': 'not-xen',
+ 'hypervisor_version': 1}]})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
@@ -576,13 +590,13 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
- """Confirm live migration to older hypervisor raises"""
+ # Confirm live migration to older hypervisor raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -592,13 +606,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 2}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 2}]})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
@@ -635,93 +649,19 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
class SchedulerDriverModuleTestCase(test.TestCase):
- """Test case for scheduler driver module methods"""
+ """Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
- def test_cast_to_compute_host_update_db_with_instance_uuid(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'instance_uuid': 'fake_uuid',
- 'extra_arg': 'meow'}
- queue = 'fake_queue'
-
- self.mox.StubOutWithMock(timeutils, 'utcnow')
- self.mox.StubOutWithMock(db, 'instance_update')
- self.mox.StubOutWithMock(rpc, 'queue_get_for')
- self.mox.StubOutWithMock(rpc, 'cast')
-
- timeutils.utcnow().AndReturn('fake-now')
- db.instance_update(self.context, 'fake_uuid',
- {'host': None, 'scheduled_at': 'fake-now'})
- rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue)
- rpc.cast(self.context, queue,
- {'method': method,
- 'args': fake_kwargs})
-
- self.mox.ReplayAll()
- driver.cast_to_compute_host(self.context, host, method,
- **fake_kwargs)
-
- def test_cast_to_compute_host_update_db_without_instance_uuid(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'extra_arg': 'meow'}
- queue = 'fake_queue'
-
- self.mox.StubOutWithMock(rpc, 'queue_get_for')
- self.mox.StubOutWithMock(rpc, 'cast')
-
- rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue)
- rpc.cast(self.context, queue,
- {'method': method,
- 'args': fake_kwargs})
-
- self.mox.ReplayAll()
- driver.cast_to_compute_host(self.context, host, method,
- **fake_kwargs)
-
- def test_cast_to_host_compute_topic(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'extra_arg': 'meow'}
-
- self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
- driver.cast_to_compute_host(self.context, host, method,
- **fake_kwargs)
-
- self.mox.ReplayAll()
- driver.cast_to_host(self.context, 'compute', host, method,
- **fake_kwargs)
-
- def test_cast_to_host_unknown_topic(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'extra_arg': 'meow'}
- topic = 'unknown'
- queue = 'fake_queue'
-
- self.mox.StubOutWithMock(rpc, 'queue_get_for')
- self.mox.StubOutWithMock(rpc, 'cast')
-
- rpc.queue_get_for(self.context, topic, host).AndReturn(queue)
- rpc.cast(self.context, queue,
- {'method': method,
- 'args': fake_kwargs})
-
- self.mox.ReplayAll()
- driver.cast_to_host(self.context, topic, host, method,
- **fake_kwargs)
-
def test_encode_instance(self):
instance = {'id': 31337,
'test_arg': 'meow'}
result = driver.encode_instance(instance, True)
expected = {'id': instance['id'], '_is_precooked': False}
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
# Orig dict not changed
self.assertNotEqual(result, instance)
@@ -729,6 +669,6 @@ class SchedulerDriverModuleTestCase(test.TestCase):
expected = {}
expected.update(instance)
expected['_is_precooked'] = True
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
# Orig dict not changed
self.assertNotEqual(result, instance)
diff --git a/nova/tests/scheduler/test_weights.py b/nova/tests/scheduler/test_weights.py
new file mode 100644
index 000000000..8699ed811
--- /dev/null
+++ b/nova/tests/scheduler/test_weights.py
@@ -0,0 +1,117 @@
+# Copyright 2011-2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler weights.
+"""
+
+from nova import context
+from nova.scheduler import weights
+from nova import test
+from nova.tests import matchers
+from nova.tests.scheduler import fakes
+
+
+class TestWeighedHost(test.TestCase):
+ def test_dict_conversion(self):
+ host_state = fakes.FakeHostState('somehost', None, {})
+ host = weights.WeighedHost(host_state, 'someweight')
+ expected = {'weight': 'someweight',
+ 'host': 'somehost'}
+ self.assertThat(host.to_dict(), matchers.DictMatches(expected))
+
+ def test_all_weighers(self):
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(len(classes), 1)
+ self.assertIn('RAMWeigher', class_names)
+
+ def test_all_weighers_with_deprecated_config1(self):
+ self.flags(compute_fill_first_cost_fn_weight=-1.0)
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(len(classes), 1)
+ self.assertIn('_LeastCostWeigher', class_names)
+
+ def test_all_weighers_with_deprecated_config2(self):
+ self.flags(least_cost_functions=['something'])
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(len(classes), 1)
+ self.assertIn('_LeastCostWeigher', class_names)
+
+
+class RamWeigherTestCase(test.TestCase):
+ def setUp(self):
+ super(RamWeigherTestCase, self).setUp()
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.ram.RAMWeigher'])
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ fakes.mox_host_manager_db_calls(self.mox, ctxt)
+ self.mox.ReplayAll()
+ host_states = self.host_manager.get_all_host_states(ctxt)
+ self.mox.VerifyAll()
+ self.mox.ResetAll()
+ return host_states
+
+ def test_default_of_spreading_first(self):
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 8192)
+ self.assertEqual(weighed_host.obj.host, 'host4')
+
+ def test_ram_filter_multiplier1(self):
+ self.flags(ram_weight_multiplier=-1.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=-512
+ # host2: free_ram_mb=-1024
+ # host3: free_ram_mb=-3072
+ # host4: free_ram_mb=-8192
+
+ # so, host1 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, -512)
+ self.assertEqual(weighed_host.obj.host, 'host1')
+
+ def test_ram_filter_multiplier2(self):
+ self.flags(ram_weight_multiplier=2.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512 * 2
+ # host2: free_ram_mb=1024 * 2
+ # host3: free_ram_mb=3072 * 2
+ # host4: free_ram_mb=8192 * 2
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 8192 * 2)
+ self.assertEqual(weighed_host.obj.host, 'host4')
diff --git a/nova/tests/servicegroup/__init__.py b/nova/tests/servicegroup/__init__.py
new file mode 100644
index 000000000..4549abf92
--- /dev/null
+++ b/nova/tests/servicegroup/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2012 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/servicegroup/test_db_servicegroup.py b/nova/tests/servicegroup/test_db_servicegroup.py
new file mode 100644
index 000000000..d14427439
--- /dev/null
+++ b/nova/tests/servicegroup/test_db_servicegroup.py
@@ -0,0 +1,143 @@
+# Copyright (c) IBM 2012 Alexey Roytman <roytman at il dot ibm dot com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import eventlet
+import fixtures
+
+from nova import context
+from nova import db
+from nova.openstack.common import timeutils
+from nova import service
+from nova import servicegroup
+from nova import test
+
+
+class ServiceFixture(fixtures.Fixture):
+
+ def __init__(self, host, binary, topic):
+ super(ServiceFixture, self).__init__()
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ self.serv = None
+
+ def setUp(self):
+ super(ServiceFixture, self).setUp()
+ self.serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.test_service.FakeManager',
+ 1, 1)
+ self.addCleanup(self.serv.kill)
+
+
+class DBServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(DBServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ self.flags(servicegroup_driver='db')
+ self.down_time = 3
+ self.flags(enable_new_services=True)
+ self.flags(service_down_time=self.down_time)
+ self.servicegroup_api = servicegroup.API()
+ self._host = 'foo'
+ self._binary = 'nova-fake'
+ self._topic = 'unittest'
+ self._ctx = context.get_admin_context()
+
+ def test_DB_driver(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ eventlet.sleep(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ serv.stop()
+ eventlet.sleep(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
+
+ def test_get_all(self):
+ host1 = self._host + '_1'
+ host2 = self._host + '_2'
+
+ serv1 = self.useFixture(
+ ServiceFixture(host1, self._binary, self._topic)).serv
+ serv1.start()
+
+ serv2 = self.useFixture(
+ ServiceFixture(host2, self._binary, self._topic)).serv
+ serv2.start()
+
+ service_ref1 = db.service_get_by_args(self._ctx,
+ host1,
+ self._binary)
+ service_ref2 = db.service_get_by_args(self._ctx,
+ host2,
+ self._binary)
+
+ services = self.servicegroup_api.get_all(self._topic)
+
+ self.assertTrue(service_ref1['host'] in services)
+ self.assertTrue(service_ref2['host'] in services)
+
+ service_id = self.servicegroup_api.get_one(self._topic)
+ self.assertTrue(service_id in services)
+
+ def test_service_is_up(self):
+ fts_func = datetime.datetime.fromtimestamp
+ fake_now = 1000
+ down_time = 5
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(timeutils, 'utcnow')
+ self.servicegroup_api = servicegroup.API()
+
+ # Up (equal)
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time),
+ 'created_at': fts_func(fake_now - self.down_time)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time + 1),
+ 'created_at': fts_func(fake_now - self.down_time + 1)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time - 3),
+ 'created_at': fts_func(fake_now - self.down_time - 3)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertFalse(result)
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 6e0a97c0c..829a98334 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the API endpoint"""
+"""Unit tests for the API endpoint."""
import random
import StringIO
@@ -29,6 +29,7 @@ try:
from boto.connection import HTTPResponse
except ImportError:
from httplib import HTTPResponse
+import fixtures
import webob
from nova.api import auth
@@ -38,22 +39,19 @@ from nova.api.ec2 import ec2utils
from nova import block_device
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
-
-
-FLAGS = flags.FLAGS
+from nova.tests import matchers
class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial"""
+ """a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
- """Returns the socket's internal buffer"""
+ """Returns the socket's internal buffer."""
return self._buffer
@@ -93,12 +91,12 @@ class FakeHttplibConnection(object):
return self.sock.response_string
def close(self):
- """Required for compatibility with boto/tornado"""
+ """Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.TestCase):
- """Unit test api xml conversion"""
+ """Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
@@ -163,7 +161,7 @@ class Ec2utilsTestCase(test.TestCase):
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
- self.assertDictMatch(out_dict, expected_dict)
+ self.assertThat(out_dict, matchers.DictMatches(expected_dict))
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
@@ -209,12 +207,12 @@ class Ec2utilsTestCase(test.TestCase):
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
- self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
- expected_result)
+ self.assertThat(block_device.mappings_prepend_dev(mappings),
+ matchers.DictListMatches(expected_result))
class ApiEc2TestCase(test.TestCase):
- """Unit test for the cloud controller on an EC2 API"""
+ """Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
@@ -224,9 +222,10 @@ class ApiEc2TestCase(test.TestCase):
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
+ self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
- """Returns a new EC2 connection"""
+ """Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
@@ -282,7 +281,7 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
- """Attempt to terminate an invalid instance"""
+ # Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
@@ -319,7 +318,7 @@ class ApiEc2TestCase(test.TestCase):
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
- """Test that we can retrieve security groups"""
+ # Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
@@ -329,7 +328,7 @@ class ApiEc2TestCase(test.TestCase):
self.assertEquals(rv[0].name, 'default')
def test_create_delete_security_group(self):
- """Test that we can create a security group"""
+ # Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
@@ -351,7 +350,7 @@ class ApiEc2TestCase(test.TestCase):
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
- """ Test that we sanely handle invalid security group names.
+ """Test that we sanely handle invalid security group names.
EC2 API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. Amazon implementation
accepts more characters - so, [:print:] is ok. """
diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py
index 381ed8070..4d62d6bbf 100644
--- a/nova/tests/test_bdm.py
+++ b/nova/tests/test_bdm.py
@@ -22,32 +22,39 @@ Tests for Block Device Mapping Code.
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova import test
+from nova.tests import matchers
class BlockDeviceMappingEc2CloudTestCase(test.TestCase):
- """Test Case for Block Device Mapping"""
+ """Test Case for Block Device Mapping."""
def fake_ec2_vol_id_to_uuid(obj, ec2_id):
+ if ec2_id == 'vol-87654321':
+ return '22222222-3333-4444-5555-666666666666'
+ elif ec2_id == 'vol-98765432':
+ return '77777777-8888-9999-0000-aaaaaaaaaaaa'
+ else:
+ return 'OhNoooo'
+
+ def fake_ec2_snap_id_to_uuid(obj, ec2_id):
if ec2_id == 'snap-12345678':
return '00000000-1111-2222-3333-444444444444'
elif ec2_id == 'snap-23456789':
return '11111111-2222-3333-4444-555555555555'
- elif ec2_id == 'vol-87654321':
- return '22222222-3333-4444-5555-666666666666'
- elif ec2_id == 'vol-98765432':
- return '77777777-8888-9999-0000-aaaaaaaaaaaa'
else:
return 'OhNoooo'
def _assertApply(self, action, bdm_list):
for bdm, expected_result in bdm_list:
- self.assertDictMatch(action(bdm), expected_result)
+ self.assertThat(action(bdm), matchers.DictMatches(expected_result))
def test_parse_block_device_mapping(self):
self.stubs.Set(ec2utils,
'ec2_vol_id_to_uuid',
self.fake_ec2_vol_id_to_uuid)
-
+ self.stubs.Set(ec2utils,
+ 'ec2_snap_id_to_uuid',
+ self.fake_ec2_snap_id_to_uuid)
bdm_list = [
({'device_name': '/dev/fake0',
'ebs': {'snapshot_id': 'snap-12345678',
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 3302aedb8..29e2e978b 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -17,7 +17,9 @@
import httplib2
import urlparse
+from cinderclient import exceptions as cinder_exception
from nova import context
+from nova import exception
from nova.volume import cinder
from nova import test
@@ -40,6 +42,11 @@ def _stub_volume(**kwargs):
volume.update(kwargs)
return volume
+_image_metadata = {
+ 'kernel_id': 'fake',
+ 'ramdisk_id': 'fake'
+}
+
class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
@@ -77,14 +84,27 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
volume = {'volume': _stub_volume(id='1234')}
return (200, volume)
+ def get_volumes_nonexisting(self, **kw):
+ raise cinder_exception.NotFound(code=404, message='Resource not found')
+
+ def get_volumes_5678(self, **kw):
+ """Volume with image metadata."""
+ volume = {'volume': _stub_volume(id='1234',
+ volume_image_metadata=_image_metadata)
+ }
+ return (200, volume)
+
class FakeCinderClient(cinder.cinder_client.Client):
- def __init__(self, username, password, project_id=None, auth_url=None):
+ def __init__(self, username, password, project_id=None, auth_url=None,
+ retries=None):
super(FakeCinderClient, self).__init__(username, password,
project_id=project_id,
- auth_url=auth_url)
- self.client = FakeHTTPClient(username, password, project_id, auth_url)
+ auth_url=auth_url,
+ retries=retries)
+ self.client = FakeHTTPClient(username, password, project_id, auth_url,
+ retries=retries)
# keep a ref to the clients callstack for factory's assert_called
self.callstack = self.client.callstack = []
@@ -146,3 +166,21 @@ class CinderTestCase(test.TestCase):
self.assertEquals(
self.fake_client_factory.client.client.management_url,
'http://other_host:8776/v1/project_id')
+
+ def test_get_non_existing_volume(self):
+ self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
+ 'nonexisting')
+
+ def test_volume_with_image_metadata(self):
+ volume = self.api.get(self.context, '5678')
+ self.assert_called('GET', '/volumes/5678')
+ self.assertTrue('volume_image_metadata' in volume)
+ self.assertEqual(volume['volume_image_metadata'], _image_metadata)
+
+ def test_cinder_http_retries(self):
+ retries = 42
+ self.flags(cinder_http_retries=retries)
+ volume = self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEquals(
+ self.fake_client_factory.client.client.retries, retries)
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
index 922dc3613..28fa423e0 100644
--- a/nova/tests/test_configdrive2.py
+++ b/nova/tests/test_configdrive2.py
@@ -22,14 +22,10 @@ import tempfile
from nova import test
-from nova import flags
from nova.openstack.common import log
from nova import utils
from nova.virt import configdrive
-
-FLAGS = flags.FLAGS
-
LOG = log.getLogger(__name__)
@@ -49,12 +45,11 @@ class ConfigDriveTestCase(test.TestCase):
self.mox.ReplayAll()
- c = configdrive.ConfigDriveBuilder()
- c._add_file('this/is/a/path/hello', 'This is some content')
- (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
- os.close(fd)
- c._make_iso9660(imagefile)
- c.cleanup()
+ with configdrive.ConfigDriveBuilder() as c:
+ c._add_file('this/is/a/path/hello', 'This is some content')
+ (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
+ os.close(fd)
+ c._make_iso9660(imagefile)
# Check cleanup
self.assertFalse(os.path.exists(c.tempdir))
@@ -72,22 +67,19 @@ class ConfigDriveTestCase(test.TestCase):
utils.mkfs('vfat', mox.IgnoreArg(),
label='config-2').AndReturn(None)
- utils.trycmd('mount', '-o', 'loop', mox.IgnoreArg(),
+ utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(),
run_as_root=True).AndReturn((None, None))
- utils.trycmd('chown', mox.IgnoreArg(), mox.IgnoreArg(),
- run_as_root=True).AndReturn((None, None))
utils.execute('umount', mox.IgnoreArg(),
run_as_root=True).AndReturn(None)
self.mox.ReplayAll()
- c = configdrive.ConfigDriveBuilder()
- c._add_file('this/is/a/path/hello', 'This is some content')
- (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
- os.close(fd)
- c._make_vfat(imagefile)
- c.cleanup()
+ with configdrive.ConfigDriveBuilder() as c:
+ c._add_file('this/is/a/path/hello', 'This is some content')
+ (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
+ os.close(fd)
+ c._make_vfat(imagefile)
# Check cleanup
self.assertFalse(os.path.exists(c.tempdir))
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
index c725079d2..83010cee2 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/test_crypto.py
@@ -23,12 +23,9 @@ import mox
from nova import crypto
from nova import db
from nova import exception
-from nova import flags
from nova import test
from nova import utils
-FLAGS = flags.FLAGS
-
class X509Test(test.TestCase):
def test_can_generate_x509(self):
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 76bd1f031..c70e96cdc 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -17,19 +17,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the DB API"""
+"""Unit tests for the DB API."""
import datetime
+import uuid as stdlib_uuid
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
+
+CONF = cfg.CONF
+CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
+CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
class DbApiTestCase(test.TestCase):
@@ -41,9 +45,45 @@ class DbApiTestCase(test.TestCase):
def create_instances_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
- 'project_id': self.project_id}
+ 'project_id': self.project_id, 'vm_state': 'fake'}
+ if 'context' in kwargs:
+ ctxt = kwargs.pop('context')
+ args['project_id'] = ctxt.project_id
+ else:
+ ctxt = self.context
args.update(kwargs)
- return db.instance_create(self.context, args)
+ return db.instance_create(ctxt, args)
+
+ def test_create_instance_unique_hostname(self):
+ otherprojectcontext = context.RequestContext(self.user_id,
+ "%s2" % self.project_id)
+
+ self.create_instances_with_args(hostname='fake_name')
+
+ # With scope 'global' any duplicate should fail, be it this project:
+ self.flags(osapi_compute_unique_server_name_scope='global')
+ self.assertRaises(exception.InstanceExists,
+ self.create_instances_with_args,
+ hostname='fake_name')
+
+ # or another:
+ self.assertRaises(exception.InstanceExists,
+ self.create_instances_with_args,
+ context=otherprojectcontext,
+ hostname='fake_name')
+
+ # With scope 'project' a duplicate in the project should fail:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.assertRaises(exception.InstanceExists,
+ self.create_instances_with_args,
+ hostname='fake_name')
+
+ # With scope 'project' a duplicate in a different project should work:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.create_instances_with_args(context=otherprojectcontext,
+ hostname='fake_name')
+
+ self.flags(osapi_compute_unique_server_name_scope=None)
def test_ec2_ids_not_found_are_printable(self):
def check_exc_format(method):
@@ -74,7 +114,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(2, len(result))
def test_instance_get_all_by_filters_regex_unsupported_db(self):
- """Ensure that the 'LIKE' operator is used for unsupported dbs."""
+ # Ensure that the 'LIKE' operator is used for unsupported dbs.
self.flags(sql_connection="notdb://")
self.create_instances_with_args(display_name='test1')
self.create_instances_with_args(display_name='test.*')
@@ -105,12 +145,12 @@ class DbApiTestCase(test.TestCase):
db.instance_destroy(self.context, inst1['uuid'])
result = db.instance_get_all_by_filters(self.context, {})
self.assertEqual(2, len(result))
- self.assertIn(inst1.id, [result[0].id, result[1].id])
- self.assertIn(inst2.id, [result[0].id, result[1].id])
- if inst1.id == result[0].id:
- self.assertTrue(result[0].deleted)
+ self.assertIn(inst1['id'], [result[0]['id'], result[1]['id']])
+ self.assertIn(inst2['id'], [result[0]['id'], result[1]['id']])
+ if inst1['id'] == result[0]['id']:
+ self.assertTrue(result[0]['deleted'])
else:
- self.assertTrue(result[1].deleted)
+ self.assertTrue(result[1]['deleted'])
def test_instance_get_all_by_filters_paginate(self):
self.flags(sql_connection="notdb://")
@@ -141,7 +181,7 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
- marker=str(utils.gen_uuid()))
+ marker=str(stdlib_uuid.uuid4()))
def test_migration_get_unconfirmed_by_dest_compute(self):
ctxt = context.get_admin_context()
@@ -170,7 +210,7 @@ class DbApiTestCase(test.TestCase):
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
- db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
+ db.migration_update(ctxt, migration['id'], {"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
@@ -180,7 +220,7 @@ class DbApiTestCase(test.TestCase):
results = db.migration_get_unconfirmed_by_dest_compute(ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
- db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
+ db.migration_update(ctxt, migration['id'], {"status": "CONFIRMED"})
def test_instance_get_all_hung_in_rebooting(self):
ctxt = context.get_admin_context()
@@ -206,20 +246,35 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(0, len(results))
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
+ def test_multi_associate_disassociate(self):
+ ctxt = context.get_admin_context()
+ values = {'address': 'floating'}
+ floating = db.floating_ip_create(ctxt, values)
+ values = {'address': 'fixed'}
+ fixed = db.fixed_ip_create(ctxt, values)
+ res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo')
+ self.assertEqual(res, fixed)
+ res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo')
+ self.assertEqual(res, None)
+ res = db.floating_ip_disassociate(ctxt, floating)
+ self.assertEqual(res, fixed)
+ res = db.floating_ip_disassociate(ctxt, floating)
+ self.assertEqual(res, None)
+
def test_network_create_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(ctxt, values)
- self.assertNotEqual(None, network.uuid)
- self.assertEqual(36, len(network.uuid))
- db_network = db.network_get(ctxt, network.id)
- self.assertEqual(network.uuid, db_network.uuid)
+ self.assertNotEqual(None, network['uuid'])
+ self.assertEqual(36, len(network['uuid']))
+ db_network = db.network_get(ctxt, network['id'])
+ self.assertEqual(network['uuid'], db_network['uuid'])
def test_network_delete_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(ctxt, values)
- db_network = db.network_get(ctxt, network.id)
+ db_network = db.network_get(ctxt, network['id'])
values = {'network_id': network['id'], 'address': 'fake1'}
address1 = db.fixed_ip_create(ctxt, values)
values = {'network_id': network['id'],
@@ -245,28 +300,100 @@ class DbApiTestCase(test.TestCase):
db.network_create_safe, ctxt, values2)
def test_instance_update_with_instance_uuid(self):
- """ test instance_update() works when an instance UUID is passed """
+ # test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
- values = {'metadata': {'host': 'foo'},
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
- values = {'metadata': {'host': 'bar'},
+ values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
- instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
+ self.assertEqual('wuff', instance_meta['key2'])
+ self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
- system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
+ system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
+ def test_instance_update_of_instance_type_id(self):
+ ctxt = context.get_admin_context()
+
+ inst_type1 = db.instance_type_get_by_name(ctxt, 'm1.tiny')
+ inst_type2 = db.instance_type_get_by_name(ctxt, 'm1.small')
+
+ values = {'instance_type_id': inst_type1['id']}
+ instance = db.instance_create(ctxt, values)
+
+ self.assertEqual(instance['instance_type']['id'], inst_type1['id'])
+ self.assertEqual(instance['instance_type']['name'],
+ inst_type1['name'])
+
+ values = {'instance_type_id': inst_type2['id']}
+ instance = db.instance_update(ctxt, instance['uuid'], values)
+
+ self.assertEqual(instance['instance_type']['id'], inst_type2['id'])
+ self.assertEqual(instance['instance_type']['name'],
+ inst_type2['name'])
+
+ def test_instance_update_unique_name(self):
+ otherprojectcontext = context.RequestContext(self.user_id,
+ "%s2" % self.project_id)
+
+ inst = self.create_instances_with_args(hostname='fake_name')
+ uuid1p1 = inst['uuid']
+ inst = self.create_instances_with_args(hostname='fake_name2')
+ uuid2p1 = inst['uuid']
+
+ inst = self.create_instances_with_args(context=otherprojectcontext,
+ hostname='fake_name3')
+ uuid1p2 = inst['uuid']
+
+ # osapi_compute_unique_server_name_scope is unset so this should work:
+ values = {'hostname': 'fake_name2'}
+ db.instance_update(self.context, uuid1p1, values)
+ values = {'hostname': 'fake_name'}
+ db.instance_update(self.context, uuid1p1, values)
+
+ # With scope 'global' any duplicate should fail.
+ self.flags(osapi_compute_unique_server_name_scope='global')
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ self.context,
+ uuid2p1,
+ values)
+
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ otherprojectcontext,
+ uuid1p2,
+ values)
+
+ # But we should definitely be able to update our name if we aren't
+ # really changing it.
+ case_only_values = {'hostname': 'fake_NAME'}
+ db.instance_update(self.context, uuid1p1, case_only_values)
+
+ # With scope 'project' a duplicate in the project should fail:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ self.context,
+ uuid2p1,
+ values)
+
+ # With scope 'project' a duplicate in a different project should work:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ db.instance_update(otherprojectcontext, uuid1p2, values)
+
def test_instance_update_with_and_get_original(self):
ctxt = context.get_admin_context()
@@ -279,10 +406,45 @@ class DbApiTestCase(test.TestCase):
self.assertEquals("building", old_ref["vm_state"])
self.assertEquals("needscoffee", new_ref["vm_state"])
+ def test_instance_update_with_extra_specs(self):
+ # Ensure _extra_specs are returned from _instance_update.
+ ctxt = context.get_admin_context()
+
+ # create a flavor
+ inst_type_dict = dict(
+ name="test_flavor",
+ memory_mb=1,
+ vcpus=1,
+ root_gb=1,
+ ephemeral_gb=1,
+ flavorid=105)
+ inst_type_ref = db.instance_type_create(ctxt, inst_type_dict)
+
+ # add some extra spec to our flavor
+ spec = {'test_spec': 'foo'}
+ db.instance_type_extra_specs_update_or_create(
+ ctxt,
+ inst_type_ref['flavorid'],
+ spec)
+
+ # create instance, just populates db, doesn't pull extra_spec
+ instance = db.instance_create(
+ ctxt,
+ {'instance_type_id': inst_type_ref['id']})
+ self.assertNotIn('extra_specs', instance)
+
+ # update instance, used when starting instance to set state, etc
+ (old_ref, new_ref) = db.instance_update_and_get_original(
+ ctxt,
+ instance['uuid'],
+ {})
+ self.assertEquals(spec, old_ref['extra_specs'])
+ self.assertEquals(spec, new_ref['extra_specs'])
+
def test_instance_fault_create(self):
- """Ensure we can create an instance fault"""
+ # Ensure we can create an instance fault.
ctxt = context.get_admin_context()
- uuid = str(utils.gen_uuid())
+ uuid = str(stdlib_uuid.uuid4())
# Create a fault
fault_values = {
@@ -298,7 +460,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(404, faults[uuid][0]['code'])
def test_instance_fault_get_by_instance(self):
- """ ensure we can retrieve an instance fault by instance UUID """
+ # ensure we can retrieve an instance fault by instance UUID.
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -347,7 +509,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(instance_faults, expected)
def test_instance_faults_get_by_instance_uuids_no_faults(self):
- """None should be returned when no faults exist"""
+ # None should be returned when no faults exist.
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -356,6 +518,225 @@ class DbApiTestCase(test.TestCase):
expected = {uuids[0]: [], uuids[1]: []}
self.assertEqual(expected, instance_faults)
+ def test_instance_action_start(self):
+ """Create an instance action."""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ db.action_start(ctxt, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self.assertEqual('run_instance', actions[0]['action'])
+ self.assertEqual(start_time, actions[0]['start_time'])
+ self.assertEqual(ctxt.request_id, actions[0]['request_id'])
+ self.assertEqual(ctxt.user_id, actions[0]['user_id'])
+ self.assertEqual(ctxt.project_id, actions[0]['project_id'])
+
+ def test_instance_action_finish(self):
+ """Create an instance action."""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_start_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ db.action_start(ctxt, action_start_values)
+
+ finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
+ action_finish_values = {'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'finish_time': finish_time}
+ db.action_finish(ctxt, action_finish_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self.assertEqual('run_instance', actions[0]['action'])
+ self.assertEqual(start_time, actions[0]['start_time'])
+ self.assertEqual(finish_time, actions[0]['finish_time'])
+ self.assertEqual(ctxt.request_id, actions[0]['request_id'])
+ self.assertEqual(ctxt.user_id, actions[0]['user_id'])
+ self.assertEqual(ctxt.project_id, actions[0]['project_id'])
+
+ def test_instance_actions_get_by_instance(self):
+ """Ensure we can get actions by UUID."""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt1, action_values)
+ action_values['action'] = 'resize'
+ db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt1, uuid1)
+ self.assertEqual(2, len(actions))
+ self.assertEqual('resize', actions[0]['action'])
+ self.assertEqual('run_instance', actions[1]['action'])
+
+ def test_instance_action_get_by_instance_and_action(self):
+ """Ensure we can get an action by instance UUID and action id."""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt1, action_values)
+ action_values['action'] = 'resize'
+ db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ actions = db.actions_get(ctxt1, uuid1)
+ action_id = actions[0]['id']
+ action = db.action_get_by_id(ctxt1, uuid1, action_id)
+ self.assertEqual('resize', action['action'])
+ self.assertEqual(ctxt1.request_id, action['request_id'])
+
+ def test_instance_action_event_start(self):
+ """Create an instance action event."""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'start_time': start_time}
+ db.action_event_start(ctxt, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ self.assertEqual(start_time, events[0]['start_time'])
+
+ def test_instance_action_event_finish(self):
+ """Finish an instance action event."""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'request_id': ctxt.request_id,
+ 'instance_uuid': uuid,
+ 'start_time': start_time}
+ db.action_event_start(ctxt, event_values)
+
+ finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
+ event_finish_values = {'event': 'schedule',
+ 'request_id': ctxt.request_id,
+ 'instance_uuid': uuid,
+ 'finish_time': finish_time}
+ db.action_event_finish(ctxt, event_finish_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ self.assertEqual(start_time, events[0]['start_time'])
+ self.assertEqual(finish_time, events[0]['finish_time'])
+
+ def test_instance_action_event_get_by_id(self):
+ """Get a specific instance action event."""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ added_action = db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+
+ start_time = timeutils.utcnow()
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'start_time': start_time}
+ added_event = db.action_event_start(ctxt1, event_values)
+
+ event_values = {'event': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_event_start(ctxt2, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ event = db.action_event_get_by_id(ctxt1, added_action['id'],
+ added_event['id'])
+ self.assertEqual('schedule', event['event'])
+ self.assertEqual(start_time, event['start_time'])
+
def test_dns_registration(self):
domain1 = 'test.domain.one'
domain2 = 'test.domain.two'
@@ -364,19 +745,23 @@ class DbApiTestCase(test.TestCase):
db.dnsdomain_register_for_zone(ctxt, domain1, testzone)
domain_ref = db.dnsdomain_get(ctxt, domain1)
- zone = domain_ref.availability_zone
- scope = domain_ref.scope
+ zone = domain_ref['availability_zone']
+ scope = domain_ref['scope']
self.assertEqual(scope, 'private')
self.assertEqual(zone, testzone)
db.dnsdomain_register_for_project(ctxt, domain2,
self.project_id)
domain_ref = db.dnsdomain_get(ctxt, domain2)
- project = domain_ref.project_id
- scope = domain_ref.scope
+ project = domain_ref['project_id']
+ scope = domain_ref['scope']
self.assertEqual(project, self.project_id)
self.assertEqual(scope, 'public')
+ expected = [domain1, domain2]
+ domains = db.dnsdomain_list(ctxt)
+ self.assertEqual(expected, domains)
+
db.dnsdomain_unregister(ctxt, domain1)
db.dnsdomain_unregister(ctxt, domain2)
@@ -406,6 +791,50 @@ class DbApiTestCase(test.TestCase):
data = db.network_get_associated_fixed_ips(ctxt, 1, 'nothing')
self.assertEqual(len(data), 0)
+ def test_network_get_all_by_host(self):
+ ctxt = context.get_admin_context()
+ data = db.network_get_all_by_host(ctxt, 'foo')
+ self.assertEqual(len(data), 0)
+ # dummy network
+ net = db.network_create_safe(ctxt, {})
+ # network with host set
+ net = db.network_create_safe(ctxt, {'host': 'foo'})
+ data = db.network_get_all_by_host(ctxt, 'foo')
+ self.assertEqual(len(data), 1)
+ # network with fixed ip with host set
+ net = db.network_create_safe(ctxt, {})
+ values = {'host': 'foo', 'network_id': net['id']}
+ fixed_address = db.fixed_ip_create(ctxt, values)
+ data = db.network_get_all_by_host(ctxt, 'foo')
+ self.assertEqual(len(data), 2)
+ # network with instance with host set
+ net = db.network_create_safe(ctxt, {})
+ instance = db.instance_create(ctxt, {'host': 'foo'})
+ values = {'instance_uuid': instance['uuid']}
+ vif = db.virtual_interface_create(ctxt, values)
+ values = {'network_id': net['id'],
+ 'virtual_interface_id': vif['id']}
+ fixed_address = db.fixed_ip_create(ctxt, values)
+ data = db.network_get_all_by_host(ctxt, 'foo')
+ self.assertEqual(len(data), 3)
+
+ def test_network_in_use_on_host(self):
+ ctxt = context.get_admin_context()
+
+ values = {'host': 'foo', 'hostname': 'myname'}
+ instance = db.instance_create(ctxt, values)
+ values = {'address': 'bar', 'instance_uuid': instance['uuid']}
+ vif = db.virtual_interface_create(ctxt, values)
+ values = {'address': 'baz',
+ 'network_id': 1,
+ 'allocated': True,
+ 'instance_uuid': instance['uuid'],
+ 'virtual_interface_id': vif['id']}
+ db.fixed_ip_create(ctxt, values)
+
+ self.assertEqual(db.network_in_use_on_host(ctxt, 1, 'foo'), True)
+ self.assertEqual(db.network_in_use_on_host(ctxt, 1, 'bar'), False)
+
def _timeout_test(self, ctxt, timeout, multi_host):
values = {'host': 'foo'}
instance = db.instance_create(ctxt, values)
@@ -534,13 +963,13 @@ class DbApiTestCase(test.TestCase):
def _get_fake_aggr_values():
- return {'name': 'fake_aggregate',
- 'availability_zone': 'fake_avail_zone', }
+ return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
- 'fake_key2': 'fake_value2'}
+ 'fake_key2': 'fake_value2',
+ 'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
@@ -560,7 +989,7 @@ def _create_aggregate_with_hosts(context=context.get_admin_context(),
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
- db.aggregate_host_add(context, result.id, host)
+ db.aggregate_host_add(context, result['id'], host)
return result
@@ -571,28 +1000,26 @@ class AggregateDBApiTestCase(test.TestCase):
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- def test_aggregate_create(self):
- """Ensure aggregate can be created with no metadata."""
+ def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
- self.assertEquals(result.name, 'fake_aggregate')
+ self.assertEquals(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
- """Test we can avoid conflict on deleted aggregates."""
r1 = _create_aggregate(metadata=None)
- db.aggregate_delete(context.get_admin_context(), r1.id)
- values = {'name': r1.name, 'availability_zone': 'new_zone'}
- r2 = _create_aggregate(values=values)
- self.assertEqual(r2.name, values['name'])
- self.assertEqual(r2.availability_zone, values['availability_zone'])
+ db.aggregate_delete(context.get_admin_context(), r1['id'])
+ values = {'name': r1['name']}
+ metadata = {'availability_zone': 'new_zone'}
+ r2 = _create_aggregate(values=values, metadata=metadata)
+ self.assertEqual(r2['name'], values['name'])
+ self.assertEqual(r2['availability_zone'],
+ metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
- """Ensure aggregate names are distinct."""
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
- """Ensure AggregateNotFound is raised when getting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -601,7 +1028,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
- """Ensure AggregateNotFound is raised when getting metadata."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -610,66 +1036,60 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
- """Ensure aggregate can be created with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
- """Ensure aggregate metadata is deleted bug 1052479."""
+ #test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
- result = _create_aggregate(metadata=None)
+ result = _create_aggregate(metadata={'availability_zone':
+ 'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertEqual(expected_metadata, {})
+ self.assertEqual(expected_metadata, {'availability_zone':
+ 'fake_avail_zone'})
def test_aggregate_create_low_privi_context(self):
- """Ensure right context is applied when creating aggregate."""
self.assertRaises(exception.AdminRequired,
db.aggregate_create,
self.context, _get_fake_aggr_values())
def test_aggregate_get(self):
- """Ensure we can get aggregate with all its relations."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
- expected = db.aggregate_get(ctxt, result.id)
- self.assertEqual(_get_fake_aggr_hosts(), expected.hosts)
- self.assertEqual(_get_fake_aggr_metadata(), expected.metadetails)
+ expected = db.aggregate_get(ctxt, result['id'])
+ self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
+ self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
- """Ensure we can get aggregates by host."""
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
- self.assertEqual([a1.id, a2.id], [x.id for x in r1])
+ self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
- """Ensure we can get aggregates by host."""
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
- self.assertEqual([a1.id], [x.id for x in r1])
+ self.assertEqual([a1['id']], [x['id'] for x in r1])
- def test_aggregate_metdata_get_by_host(self):
- """Ensure we can get aggregates by host."""
+ def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
- values2 = {'name': 'fake_aggregate3',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
@@ -678,13 +1098,10 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertFalse('badkey' in r1)
- def test_aggregate_metdata_get_by_host_with_key(self):
- """Ensure we can get aggregates by host."""
+ def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone', }
- values2 = {'name': 'fake_aggregate3',
- 'availability_zone': 'fake_avail_zone', }
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
@@ -694,19 +1111,29 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(r1['good'], set(['value']))
self.assertFalse('fake_key1' in r1)
# Delete metadata
- db.aggregate_metadata_delete(ctxt, a3.id, 'good')
+ db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
key='good')
self.assertFalse('good' in r2)
+ def test_aggregate_host_get_by_metadata_key(self):
+ ctxt = context.get_admin_context()
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
+ a1 = _create_aggregate_with_hosts(context=ctxt)
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
+ a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=['foo.openstack.org'], metadata={'good': 'value'})
+ r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
+ self.assertEqual(r1, {'foo.openstack.org': set(['value'])})
+ self.assertFalse('fake_key1' in r1)
+
def test_aggregate_get_by_host_not_found(self):
- """Ensure AggregateHostNotFound is raised with unknown host."""
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
- """Ensure AggregateNotFound is raised when deleting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -715,7 +1142,6 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id)
def test_aggregate_delete(self):
- """Ensure we can delete an aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
@@ -723,41 +1149,44 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
- self.assertEqual(aggregate.deleted, True)
+ self.assertEqual(aggregate['deleted'], True)
def test_aggregate_update(self):
- """Ensure an aggregate can be updated."""
ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ self.assertEqual(result.availability_zone, 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, 1, new_values)
- self.assertNotEqual(result.availability_zone,
- updated.availability_zone)
+ self.assertNotEqual(result['availability_zone'],
+ updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
- """Ensure an aggregate can be updated with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
+ values['availability_zone'] = 'different_avail_zone'
db.aggregate_update(ctxt, 1, values)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(_get_fake_aggr_metadata(), expected)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ updated = db.aggregate_get(ctxt, result['id'])
+ self.assertThat(values['metadata'],
+ matchers.DictMatches(expected))
+ self.assertNotEqual(result.availability_zone,
+ updated.availability_zone)
def test_aggregate_update_with_existing_metadata(self):
- """Ensure an aggregate can be updated with existing metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
db.aggregate_update(ctxt, 1, values)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(values['metadata'], expected)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(values['metadata'], matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
- """Ensure AggregateNotFound is raised when updating an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -766,113 +1195,109 @@ class AggregateDBApiTestCase(test.TestCase):
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
- """Ensure we can get all aggregates."""
ctxt = context.get_admin_context()
counter = 3
for c in xrange(counter):
_create_aggregate(context=ctxt,
- values={'name': 'fake_aggregate_%d' % c,
- 'availability_zone': 'fake_avail_zone'},
+ values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
- """Ensure we get only non-deleted aggregates."""
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in xrange(1, add_counter):
- values = {'name': 'fake_aggregate_%d' % c,
- 'availability_zone': 'fake_avail_zone'}
+ values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in xrange(1, remove_counter):
- db.aggregate_delete(ctxt, aggregates[c - 1].id)
+ db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
- """Ensure we can add metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
- db.aggregate_metadata_add(ctxt, result.id, metadata)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(metadata, expected)
+ db.aggregate_metadata_add(ctxt, result['id'], metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_update(self):
- """Ensure we can update metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
- db.aggregate_metadata_delete(ctxt, result.id, key)
+ db.aggregate_metadata_delete(ctxt, result['id'], key)
new_metadata = {key: 'foo'}
- db.aggregate_metadata_add(ctxt, result.id, new_metadata)
- expected = db.aggregate_metadata_get(ctxt, result.id)
+ db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
metadata[key] = 'foo'
- self.assertDictMatch(metadata, expected)
+ self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
- """Ensure we can delete metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
- db.aggregate_metadata_add(ctxt, result.id, metadata)
- db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0])
- expected = db.aggregate_metadata_get(ctxt, result.id)
+ db.aggregate_metadata_add(ctxt, result['id'], metadata)
+ db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
del metadata[metadata.keys()[0]]
- self.assertDictMatch(metadata, expected)
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_remove_availability_zone(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ db.aggregate_metadata_delete(ctxt, result.id, 'availability_zone')
+ expected = db.aggregate_metadata_get(ctxt, result.id)
+ aggregate = db.aggregate_get(ctxt, result.id)
+ self.assertEquals(aggregate.availability_zone, None)
+ self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
- """Ensure AggregateMetadataNotFound is raised when deleting."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
- ctxt, result.id, 'foo_key')
+ ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
- """Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- expected = db.aggregate_host_get_all(ctxt, result.id)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
- def test_aggregate_host_add_deleted(self):
- """Ensure we can add a host that was previously deleted."""
+ def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
- db.aggregate_host_delete(ctxt, result.id, host)
- db.aggregate_host_add(ctxt, result.id, host)
- expected = db.aggregate_host_get_all(ctxt, result.id)
+ db.aggregate_host_delete(ctxt, result['id'], host)
+ db.aggregate_host_add(ctxt, result['id'], host)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
- """Ensure we can add host to distinct aggregates."""
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
- values={'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone2', },
- metadata=None)
- h1 = db.aggregate_host_get_all(ctxt, r1.id)
- h2 = db.aggregate_host_get_all(ctxt, r2.id)
+ values={'name': 'fake_aggregate2'},
+ metadata={'availability_zone': 'fake_avail_zone2'})
+ h1 = db.aggregate_host_get_all(ctxt, r1['id'])
+ h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
- """Ensure we cannot add host to the same aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
- ctxt, result.id, _get_fake_aggr_hosts()[0])
+ ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
- """Ensure AggregateFound when adding a host."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
@@ -882,21 +1307,19 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
- """Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- db.aggregate_host_delete(ctxt, result.id,
+ db.aggregate_host_delete(ctxt, result['id'],
_get_fake_aggr_hosts()[0])
- expected = db.aggregate_host_get_all(ctxt, result.id)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
- """Ensure AggregateHostNotFound is raised when deleting a host."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
- ctxt, result.id, _get_fake_aggr_hosts()[0])
+ ctxt, result['id'], _get_fake_aggr_hosts()[0])
class CapacityTestCase(test.TestCase):
@@ -916,7 +1339,7 @@ class CapacityTestCase(test.TestCase):
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
- service_id=self.service.id)
+ service_id=self.service['id'])
# add some random stats
stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
@@ -938,10 +1361,10 @@ class CapacityTestCase(test.TestCase):
def test_compute_node_create(self):
item = self._create_helper('host1')
- self.assertEquals(item.free_ram_mb, 1024)
- self.assertEquals(item.free_disk_gb, 2048)
- self.assertEquals(item.running_vms, 0)
- self.assertEquals(item.current_workload, 0)
+ self.assertEquals(item['free_ram_mb'], 1024)
+ self.assertEquals(item['free_disk_gb'], 2048)
+ self.assertEquals(item['running_vms'], 0)
+ self.assertEquals(item['current_workload'], 0)
stats = self._stats_as_dict(item['stats'])
self.assertEqual(3, stats['num_instances'])
@@ -1002,6 +1425,65 @@ class CapacityTestCase(test.TestCase):
self.assertEqual(1, int(stat['value']))
+class MigrationTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MigrationTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ self._create()
+ self._create()
+ self._create(status='reverted')
+ self._create(status='confirmed')
+ self._create(source_compute='host2', source_node='b',
+ dest_compute='host1', dest_node='a')
+ self._create(source_compute='host2', dest_compute='host3')
+ self._create(source_compute='host3', dest_compute='host4')
+
+ def _create(self, status='migrating', source_compute='host1',
+ source_node='a', dest_compute='host2', dest_node='b'):
+
+ values = {'host': source_compute}
+ instance = db.instance_create(self.ctxt, values)
+
+ values = {'status': status, 'source_compute': source_compute,
+ 'source_node': source_node, 'dest_compute': dest_compute,
+ 'dest_node': dest_node, 'instance_uuid': instance['uuid']}
+ db.migration_create(self.ctxt, values)
+
+ def _assert_in_progress(self, migrations):
+ for migration in migrations:
+ self.assertNotEqual('confirmed', migration['status'])
+ self.assertNotEqual('reverted', migration['status'])
+
+ def test_in_progress_host1_nodea(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host1', 'a')
+ # 2 as source + 1 as dest
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_in_progress_host1_nodeb(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host1', 'b')
+ # some migrations are to/from host1, but none with a node 'b'
+ self.assertEqual(0, len(migrations))
+
+ def test_in_progress_host2_nodeb(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host2', 'b')
+ # 2 as dest, 1 as source
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_instance_join(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host2', 'b')
+ for migration in migrations:
+ instance = migration['instance']
+ self.assertEqual(migration['instance_uuid'], instance['uuid'])
+
+
class TestIpAllocation(test.TestCase):
def setUp(self):
@@ -1018,28 +1500,28 @@ class TestIpAllocation(test.TestCase):
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
- self.ctxt, None, self.instance.uuid)
+ self.ctxt, None, self.instance['uuid'])
def test_fixed_ip_associate_fails_if_ip_in_use(self):
- address = self.create_fixed_ip(instance_uuid=self.instance.uuid)
+ address = self.create_fixed_ip(instance_uuid=self.instance['uuid'])
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
- self.ctxt, address, self.instance.uuid)
+ self.ctxt, address, self.instance['uuid'])
def test_fixed_ip_associate_succeeds(self):
- address = self.create_fixed_ip(network_id=self.network.id)
- db.fixed_ip_associate(self.ctxt, address, self.instance.uuid,
- network_id=self.network.id)
+ address = self.create_fixed_ip(network_id=self.network['id'])
+ db.fixed_ip_associate(self.ctxt, address, self.instance['uuid'],
+ network_id=self.network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
- self.assertEqual(fixed_ip.instance_uuid, self.instance.uuid)
+ self.assertEqual(fixed_ip['instance_uuid'], self.instance['uuid'])
def test_fixed_ip_associate_succeeds_and_sets_network(self):
address = self.create_fixed_ip()
- db.fixed_ip_associate(self.ctxt, address, self.instance.uuid,
- network_id=self.network.id)
+ db.fixed_ip_associate(self.ctxt, address, self.instance['uuid'],
+ network_id=self.network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
- self.assertEqual(fixed_ip.instance_uuid, self.instance.uuid)
- self.assertEqual(fixed_ip.network_id, self.network.id)
+ self.assertEqual(fixed_ip['instance_uuid'], self.instance['uuid'])
+ self.assertEqual(fixed_ip['network_id'], self.network['id'])
class InstanceDestroyConstraints(test.TestCase):
@@ -1079,185 +1561,87 @@ class InstanceDestroyConstraints(test.TestCase):
self.assertFalse(instance['deleted'])
-def _get_sm_backend_params():
- config_params = ("name_label=testsmbackend "
- "server=localhost "
- "serverpath=/tmp/nfspath")
- params = dict(flavor_id=1,
- sr_uuid=None,
- sr_type='nfs',
- config_params=config_params)
- return params
-
-
-def _get_sm_flavor_params():
- params = dict(label="gold",
- description="automatic backups")
- return params
-
-
-class SMVolumeDBApiTestCase(test.TestCase):
+class VolumeUsageDBApiTestCase(test.TestCase):
def setUp(self):
- super(SMVolumeDBApiTestCase, self).setUp()
+ super(VolumeUsageDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- def test_sm_backend_conf_create(self):
- params = _get_sm_backend_params()
- ctxt = context.get_admin_context()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- self.assertIsInstance(beconf['id'], int)
-
- def test_sm_backend_conf_create_raise_duplicate(self):
- params = _get_sm_backend_params()
- ctxt = context.get_admin_context()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- self.assertIsInstance(beconf['id'], int)
- self.assertRaises(exception.Duplicate,
- db.sm_backend_conf_create,
- ctxt,
- params)
-
- def test_sm_backend_conf_update(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- beconf = db.sm_backend_conf_update(ctxt,
- beconf['id'],
- dict(sr_uuid="FA15E-1D"))
- self.assertEqual(beconf['sr_uuid'], "FA15E-1D")
-
- def test_sm_backend_conf_update_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_update,
- ctxt,
- 7,
- dict(sr_uuid="FA15E-1D"))
-
- def test_sm_backend_conf_get(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- val = db.sm_backend_conf_get(ctxt, beconf['id'])
- self.assertDictMatch(dict(val), dict(beconf))
-
- def test_sm_backend_conf_get_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_get,
- ctxt,
- 7)
-
- def test_sm_backend_conf_get_by_sr(self):
+ def test_vol_usage_update_no_totals_update(self):
ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- val = db.sm_backend_conf_get_by_sr(ctxt, beconf['sr_uuid'])
- self.assertDictMatch(dict(val), dict(beconf))
-
- def test_sm_backend_conf_get_by_sr_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_get_by_sr,
- ctxt,
- "FA15E-1D")
-
- def test_sm_backend_conf_delete(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- db.sm_backend_conf_delete(ctxt, beconf['id'])
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_get,
- ctxt,
- beconf['id'])
-
- def test_sm_backend_conf_delete_nonexisting(self):
- ctxt = context.get_admin_context()
- db.sm_backend_conf_delete(ctxt, "FA15E-1D")
-
- def test_sm_flavor_create(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- self.assertIsInstance(flav['id'], int)
-
- def sm_flavor_create_raise_duplicate(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- self.assertRaises(exception.Duplicate,
- db.sm_flavor_create,
- params)
-
- def test_sm_flavor_update(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- newparms = dict(description="basic volumes")
- flav = db.sm_flavor_update(ctxt, flav['id'], newparms)
- self.assertEqual(flav['description'], "basic volumes")
-
- def test_sm_flavor_update_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_flavor_update,
- ctxt,
- 7,
- dict(description="fakedesc"))
-
- def test_sm_flavor_delete(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- db.sm_flavor_delete(ctxt, flav['id'])
- self.assertRaises(exception.NotFound,
- db.sm_flavor_get,
- ctxt,
- "gold")
-
- def test_sm_flavor_delete_nonexisting(self):
- ctxt = context.get_admin_context()
- db.sm_flavor_delete(ctxt, 7)
-
- def test_sm_flavor_get(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- val = db.sm_flavor_get(ctxt, flav['id'])
- self.assertDictMatch(dict(val), dict(flav))
-
- def test_sm_flavor_get_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_flavor_get,
- ctxt,
- 7)
-
- def test_sm_flavor_get_by_label(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- val = db.sm_flavor_get_by_label(ctxt, flav['label'])
- self.assertDictMatch(dict(val), dict(flav))
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ start_time = now - datetime.timedelta(seconds=10)
+ refreshed_time = now - datetime.timedelta(seconds=5)
+
+ expected_vol_usages = [{'volume_id': u'1',
+ 'curr_reads': 1000,
+ 'curr_read_bytes': 2000,
+ 'curr_writes': 3000,
+ 'curr_write_bytes': 4000},
+ {'volume_id': u'2',
+ 'curr_reads': 100,
+ 'curr_read_bytes': 200,
+ 'curr_writes': 300,
+ 'curr_write_bytes': 400}]
+
+ def _compare(vol_usage, expected):
+ for key, value in expected.items():
+ self.assertEqual(vol_usage[key], value)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20,
+ wr_req=30, wr_bytes=40, instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000,
+ wr_req=3000, wr_bytes=4000,
+ instance_id=1,
+ last_refreshed=refreshed_time)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 2)
+ _compare(vol_usages[0], expected_vol_usages[0])
+ _compare(vol_usages[1], expected_vol_usages[1])
+ timeutils.clear_time_override()
- def test_sm_flavor_get_by_label_raise_notfound(self):
+ def test_vol_usage_update_totals_update(self):
ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_flavor_get,
- ctxt,
- "fake")
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ start_time = now - datetime.timedelta(seconds=10)
+ expected_vol_usages = {'volume_id': u'1',
+ 'tot_reads': 600,
+ 'tot_read_bytes': 800,
+ 'tot_writes': 1000,
+ 'tot_write_bytes': 1200,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0}
+
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id=1,
+ update_totals=True)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400,
+ wr_req=500, wr_bytes=600,
+ instance_id=1)
+ vol_usage = db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500,
+ wr_req=600, wr_bytes=700,
+ instance_id=1,
+ update_totals=True)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+
+ self.assertEquals(1, len(vol_usages))
+ for key, value in expected_vol_usages.items():
+ self.assertEqual(vol_usages[0][key], value)
+ timeutils.clear_time_override()
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
index f7e4bc037..ad67cff26 100644
--- a/nova/tests/test_exception.py
+++ b/nova/tests/test_exception.py
@@ -52,23 +52,23 @@ class FakeNotifier(object):
self.provided_context = context
-def good_function():
+def good_function(self, context):
return 99
-def bad_function_exception(blah="a", boo="b", context=None):
+def bad_function_exception(self, context, extra, blah="a", boo="b", zoo=None):
raise test.TestingException()
class WrapExceptionTestCase(test.TestCase):
def test_wrap_exception_good_return(self):
wrapped = exception.wrap_exception()
- self.assertEquals(99, wrapped(good_function)())
+ self.assertEquals(99, wrapped(good_function)(1, 2))
def test_wrap_exception_throws_exception(self):
wrapped = exception.wrap_exception()
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception))
+ wrapped(bad_function_exception), 1, 2, 3)
def test_wrap_exception_with_notifier(self):
notifier = FakeNotifier()
@@ -76,7 +76,7 @@ class WrapExceptionTestCase(test.TestCase):
"level")
ctxt = context.get_admin_context()
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception), context=ctxt)
+ wrapped(bad_function_exception), 1, ctxt, 3, zoo=3)
self.assertEquals(notifier.provided_publisher, "publisher")
self.assertEquals(notifier.provided_event, "event")
self.assertEquals(notifier.provided_priority, "level")
@@ -88,7 +88,7 @@ class WrapExceptionTestCase(test.TestCase):
notifier = FakeNotifier()
wrapped = exception.wrap_exception(notifier)
self.assertRaises(test.TestingException,
- wrapped(bad_function_exception))
+ wrapped(bad_function_exception), 1, 2, 3)
self.assertEquals(notifier.provided_publisher, None)
self.assertEquals(notifier.provided_event, "bad_function_exception")
self.assertEquals(notifier.provided_priority, notifier.ERROR)
@@ -117,8 +117,8 @@ class NovaExceptionTestCase(test.TestCase):
class FakeNovaException(exception.NovaException):
message = "default message: %(mispelled_code)s"
- exc = FakeNovaException(code=500)
- self.assertEquals(unicode(exc), 'default message: %(mispelled_code)s')
+ exc = FakeNovaException(code=500, mispelled_code='blah')
+ self.assertEquals(unicode(exc), 'default message: blah')
def test_default_error_code(self):
class FakeNovaException(exception.NovaException):
diff --git a/nova/tests/test_filters.py b/nova/tests/test_filters.py
new file mode 100644
index 000000000..13fd122c8
--- /dev/null
+++ b/nova/tests/test_filters.py
@@ -0,0 +1,125 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+import inspect
+import sys
+
+from nova import filters
+from nova import loadables
+from nova import test
+
+
+class Filter1(filters.BaseFilter):
+ """Test Filter class #1."""
+ pass
+
+
+class Filter2(filters.BaseFilter):
+ """Test Filter class #2."""
+ pass
+
+
+class FiltersTestCase(test.TestCase):
+ def test_filter_all(self):
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ result = base_filter.filter_all(filter_obj_list, filter_properties)
+ self.assertTrue(inspect.isgenerator(result))
+ self.assertEqual(list(result), ['obj1', 'obj3'])
+
+ def test_filter_all_recursive_yields(self):
+ # Test filter_all() allows generators from previous filter_all()s.
+ # filter_all() yields results. We want to make sure that we can
+ # call filter_all() with generators returned from previous calls
+ # to filter_all().
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ total_iterations = 200
+
+ # The order that _filter_one is going to get called gets
+ # confusing because we will be recursively yielding things..
+ # We are going to simulate the first call to filter_all()
+ # returning False for 'obj2'. So, 'obj1' will get yielded
+ # 'total_iterations' number of times before the first filter_all()
+ # call gets to processing 'obj2'. We then return 'False' for it.
+ # After that, 'obj3' gets yielded 'total_iterations' number of
+ # times.
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+ self.mox.ReplayAll()
+
+ objs = iter(filter_obj_list)
+ for x in xrange(total_iterations):
+ # Pass in generators returned from previous calls.
+ objs = base_filter.filter_all(objs, filter_properties)
+ self.assertTrue(inspect.isgenerator(objs))
+ self.assertEqual(list(objs), ['obj1', 'obj3'])
+
+ def test_get_filtered_objects(self):
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_objs_second = ['second', 'filter2', 'objects2']
+ filter_objs_last = ['last', 'filter3', 'objects3']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(filter_objs_second)
+ Filter2().AndReturn(filt2_mock)
+ filt2_mock.filter_all(filter_objs_second,
+ filter_properties).AndReturn(filter_objs_last)
+
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ result = filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+ self.assertEqual(result, filter_objs_last)
diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py
deleted file mode 100644
index ad94f6550..000000000
--- a/nova/tests/test_flags.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import config
-from nova import flags
-from nova.openstack.common import cfg
-from nova import test
-
-CONF = config.CONF
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cfg.StrOpt('flags_unittest',
- default='foo',
- help='for testing purposes only'))
-
-
-class FlagsTestCase(test.TestCase):
- def test_declare(self):
- self.assert_('answer' not in CONF)
- CONF.import_opt('answer', 'nova.tests.declare_flags')
- self.assert_('answer' in CONF)
- self.assertEqual(CONF.answer, 42)
-
- # Make sure we don't overwrite anything
- CONF.set_override('answer', 256)
- self.assertEqual(CONF.answer, 256)
- CONF.import_opt('answer', 'nova.tests.declare_flags')
- self.assertEqual(CONF.answer, 256)
-
- def test_getopt_non_interspersed_args(self):
- self.assert_('runtime_answer' not in FLAGS)
-
- argv = ['flags_test', 'extra_arg', '--runtime_answer=60']
- args = config.parse_args(argv, default_config_files=[])
- self.assertEqual(len(args), 3)
- self.assertEqual(argv, args)
-
- def test_runtime_and_unknown_flags(self):
- self.assert_('runtime_answer' not in FLAGS)
- import nova.tests.runtime_flags
- self.assert_('runtime_answer' in FLAGS)
- self.assertEqual(FLAGS.runtime_answer, 54)
-
- def test_long_vs_short_flags(self):
- FLAGS.clear()
- FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
- default='val',
- help='desc'))
- argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
- args = config.parse_args(argv, default_config_files=[])
-
- self.assert_('duplicate_answer' not in FLAGS)
- self.assert_(FLAGS.duplicate_answer_long, 60)
-
- FLAGS.clear()
- FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
- default=60, help='desc'))
- args = config.parse_args(argv, default_config_files=[])
- self.assertEqual(FLAGS.duplicate_answer, 60)
- self.assertEqual(FLAGS.duplicate_answer_long, 'val')
-
- def test_flag_leak_left(self):
- self.assertEqual(FLAGS.flags_unittest, 'foo')
- self.flags(flags_unittest='bar')
- self.assertEqual(FLAGS.flags_unittest, 'bar')
-
- def test_flag_leak_right(self):
- self.assertEqual(FLAGS.flags_unittest, 'foo')
- self.flags(flags_unittest='bar')
- self.assertEqual(FLAGS.flags_unittest, 'bar')
-
- def test_flag_overrides(self):
- self.assertEqual(FLAGS.flags_unittest, 'foo')
- self.flags(flags_unittest='bar')
- self.assertEqual(FLAGS.flags_unittest, 'bar')
- FLAGS.reset()
- self.assertEqual(FLAGS.flags_unittest, 'foo')
diff --git a/nova/tests/test_hooks.py b/nova/tests/test_hooks.py
new file mode 100644
index 000000000..0b61d6924
--- /dev/null
+++ b/nova/tests/test_hooks.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for hook customization."""
+
+import stevedore
+
+from nova import hooks
+from nova import test
+
+
+class SampleHookA(object):
+ name = "a"
+
+ def _add_called(self, op, kwargs):
+ called = kwargs.get('called', None)
+ if called is not None:
+ called.append(op + self.name)
+
+ def pre(self, *args, **kwargs):
+ self._add_called("pre", kwargs)
+
+
+class SampleHookB(SampleHookA):
+ name = "b"
+
+ def post(self, rv, *args, **kwargs):
+ self._add_called("post", kwargs)
+
+
+class MockEntryPoint(object):
+
+ def __init__(self, cls):
+ self.cls = cls
+
+ def load(self):
+ return self.cls
+
+
+class HookTestCase(test.TestCase):
+
+ def _mock_load_plugins(self, iload, iargs, ikwargs):
+ return [
+ stevedore.extension.Extension('test_hook',
+ MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
+ stevedore.extension.Extension('test_hook',
+ MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
+ ]
+
+ def setUp(self):
+ super(HookTestCase, self).setUp()
+
+ hooks.reset()
+
+ self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
+ self._mock_load_plugins)
+
+ @hooks.add_hook('test_hook')
+ def _hooked(self, a, b=1, c=2, called=None):
+ return 42
+
+ def test_basic(self):
+ self.assertEqual(42, self._hooked(1))
+
+ mgr = hooks._HOOKS['test_hook']
+ self.assertEqual(2, len(mgr.extensions))
+ self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
+ self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
+
+ def test_order_of_execution(self):
+ called_order = []
+ self._hooked(42, called=called_order)
+ self.assertEqual(['prea', 'preb', 'postb'], called_order)
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 6d2396350..9fec9d151 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -18,6 +18,7 @@
Test suite for the Hyper-V driver and related APIs.
"""
+import json
import os
import platform
import shutil
@@ -25,21 +26,25 @@ import sys
import uuid
from nova.compute import power_state
+from nova.compute import task_states
from nova import context
from nova import db
-from nova import flags
from nova.image import glance
+from nova.openstack.common import cfg
from nova.tests import fake_network
from nova.tests.hyperv import basetestcase
from nova.tests.hyperv import db_fakes
from nova.tests.hyperv import hypervutils
from nova.tests.hyperv import mockproxy
import nova.tests.image.fake as fake_image
+from nova.tests import matchers
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import vmutils
from nova.virt import images
+CONF = cfg.CONF
+
class HyperVAPITestCase(basetestcase.BaseTestCase):
"""Unit tests for Hyper-V driver calls."""
@@ -56,14 +61,15 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._update_image_raise_exception = False
self._post_method_called = False
self._recover_method_called = False
- self._volume_target_portal = '192.168.1.112:3260'
- self._volume_id = '10958016-e196-42e3-9e7f-5d8927ae3099'
+ self._volume_target_portal = 'testtargetportal:3260'
+ self._volume_id = '8957e088-dbee-4216-8056-978353a3e737'
self._context = context.RequestContext(self._user_id, self._project_id)
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
- vswitch_name='external')
+ vswitch_name='external',
+ network_api_class='nova.network.quantumv2.api.API')
self._hypervutils = hypervutils.HyperVUtils()
self._conn = driver_hyperv.HyperVDriver(None)
@@ -71,6 +77,11 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def fake_dumps(msg, default=None, **kwargs):
+ return '""'
+ self.stubs.Set(json, 'dumps', fake_dumps)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
@@ -96,31 +107,40 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
'shutil',
'uuid',
'time',
- 'subprocess',
'multiprocessing',
- '_winreg'
+ '_winreg',
+ 'nova.virt.configdrive',
+ 'nova.utils',
+ 'ctypes'
]
# Modules in which the mocks are going to be injected
from nova.virt.hyperv import baseops
+ from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import snapshotops
+ from nova.virt.hyperv import vif
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
+ from nova.virt.hyperv import volumeutilsV2
modules_to_test = [
driver_hyperv,
+ basevolumeutils,
baseops,
hostops,
+ vif,
vmops,
vmutils,
volumeops,
volumeutils,
+ volumeutilsV2,
snapshotops,
livemigrationops,
hypervutils,
+ db_fakes,
sys.modules[__name__]
]
@@ -148,14 +168,14 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._hypervutils.logout_iscsi_volume_sessions(self._volume_id)
- shutil.rmtree(flags.FLAGS.instances_path, True)
+ shutil.rmtree(CONF.instances_path, True)
fake_image.FakeImageService_reset()
finally:
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
- dic = self._conn.get_available_resource()
+ dic = self._conn.get_available_resource(None)
self.assertEquals(dic['hypervisor_hostname'], platform.node())
@@ -177,7 +197,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._spawn_instance(True)
info = self._conn.get_info(self._instance_data)
- self.assertEquals(info["state"], str(power_state.RUNNING))
+ self.assertEquals(info["state"], power_state.RUNNING)
def test_spawn_cow_image(self):
self._test_spawn_instance(True)
@@ -185,7 +205,47 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
+ def test_spawn_config_drive(self):
+ self.skip('broken by move to contextlib for configdrive')
+
+ self.flags(force_config_drive=True)
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ self._spawn_instance(True)
+
+ (vhd_paths, _, dvd_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(dvd_paths), 0)
+ self.assertEquals(len(vhd_paths), 2)
+
+ def test_spawn_config_drive_cdrom(self):
+ self.skip('broken by move to contextlib for configdrive')
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_cdrom=True)
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ self._spawn_instance(True)
+
+ (vhd_paths, _, dvd_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(dvd_paths), 1)
+ self.assertEquals(len(vhd_paths), 1)
+ self.assertTrue(os.path.exists(dvd_paths[0]))
+
+ def test_spawn_no_config_drive(self):
+ self.flags(force_config_drive=False)
+
+ self._spawn_instance(True)
+
+ (_, _, dvd_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(dvd_paths), 0)
+
def test_spawn_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
# Set flag to a non existing vswitch
self.flags(vswitch_name=str(uuid.uuid4()))
self.assertRaises(vmutils.HyperVException, self._spawn_instance, True)
@@ -231,12 +291,12 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
- self._test_vm_state_change(self._conn.resume,
+ self._test_vm_state_change(lambda i: self._conn.resume(i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
- self._test_vm_state_change(self._conn.resume, None,
+ self._test_vm_state_change(lambda i: self._conn.resume(i, None), None,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
@@ -269,7 +329,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def test_destroy(self):
self._spawn_instance(True)
- (vhd_paths, _) = self._hypervutils.get_vm_disks(
+ (vhd_paths, _, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self._conn.destroy(self._instance_data)
@@ -286,7 +346,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._spawn_instance(False)
# Existing server
- self._dest_server = "HV12RCTest1"
+ self._dest_server = "HV12OSDEMO2"
self._live_migration(self._dest_server)
@@ -355,27 +415,55 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self.assertTrue(self._fetched_image is None)
def test_snapshot_with_update_failure(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self._spawn_instance(True)
self._update_image_raise_exception = True
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
- self._context, self._instance_data, snapshot_name)
+ self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+
+ # assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
# assert VM snapshots have been removed
self.assertEquals(self._hypervutils.get_vm_snapshots_count(
self._instance_data["name"]), 0)
def test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self._spawn_instance(True)
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
- self._conn.snapshot(self._context, self._instance_data, snapshot_name)
+ self._conn.snapshot(self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
self.assertTrue(self._image_metadata and
"disk_format" in self._image_metadata and
self._image_metadata["disk_format"] == "vhd")
+ # assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
# assert VM snapshots have been removed
self.assertEquals(self._hypervutils.get_vm_snapshots_count(
self._instance_data["name"]), 0)
@@ -408,7 +496,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
- (vhd_paths, _) = self._hypervutils.get_vm_disks(
+ (vhd_paths, _, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(vhd_paths), 1)
@@ -426,12 +514,12 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._volume_target_portal, self._volume_id)
self._conn.attach_volume(connection_info,
- self._instance_data["name"], '/dev/sdc')
+ self._instance_data, '/dev/sdc')
def test_attach_volume(self):
self._attach_volume()
- (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 1)
@@ -445,9 +533,9 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._volume_target_portal, self._volume_id)
self._conn.detach_volume(connection_info,
- self._instance_data["name"], '/dev/sdc')
+ self._instance_data, '/dev/sdc')
- (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 0)
@@ -461,7 +549,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._spawn_instance(False, block_device_info)
- (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 1)
@@ -469,13 +557,3 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
self._volume_id)
self.assertTrue(sessions_exist)
-
- def test_attach_volume_with_target_connection_failure(self):
- self._spawn_instance(True)
-
- target = 'nonexistingtarget:3260'
- connection_info = db_fakes.get_fake_volume_info_data(target,
- self._volume_id)
-
- self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
- connection_info, self._instance_data["name"], '/dev/sdc')
diff --git a/nova/tests/test_image_utils.py b/nova/tests/test_image_utils.py
index fac0422bf..a9768f821 100644
--- a/nova/tests/test_image_utils.py
+++ b/nova/tests/test_image_utils.py
@@ -14,13 +14,106 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+
from nova import test
from nova import utils
from nova.virt import images
+from nova.virt.libvirt import utils as libvirt_utils
class ImageUtilsTestCase(test.TestCase):
+ def test_disk_type(self):
+ # Seems like lvm detection
+ # if its in /dev ??
+ for p in ['/dev/b', '/dev/blah/blah']:
+ d_type = libvirt_utils.get_disk_type(p)
+ self.assertEquals('lvm', d_type)
+ # Try the other types
+ template_output = """image: %(path)s
+file format: %(format)s
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ path = '/myhome/disk.config'
+ for f in ['raw', 'qcow2']:
+ output = template_output % ({
+ 'format': f,
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_type = libvirt_utils.get_disk_type(path)
+ self.assertEquals(f, d_type)
+ self.mox.UnsetStubs()
+
+ def test_disk_backing(self):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: 2K (2048 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ output = template_output % ({
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_backing = libvirt_utils.get_disk_backing_file(path)
+ self.assertEquals(None, d_backing)
+
+ def test_disk_size(self):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: %(v_size)s (%(vsize_b)s bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ for i in range(0, 128):
+ bytes = i * 65336
+ kbytes = bytes / 1024
+ mbytes = kbytes / 1024
+ output = template_output % ({
+ 'v_size': "%sM" % (mbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_size = libvirt_utils.get_disk_size(path)
+ self.assertEquals(i, d_size)
+ self.mox.UnsetStubs()
+ output = template_output % ({
+ 'v_size': "%sK" % (kbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_size = libvirt_utils.get_disk_size(path)
+ self.assertEquals(i, d_size)
+ self.mox.UnsetStubs()
+
def test_qemu_info_canon(self):
path = "disk.config"
example_output = """image: disk.config
@@ -30,7 +123,9 @@ cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
@@ -50,7 +145,9 @@ cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
@@ -75,7 +172,9 @@ ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
@@ -101,7 +200,9 @@ ID TAG VM SIZE DATE VM CLOCK
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
@@ -123,7 +224,9 @@ ID TAG VM SIZE DATE VM CLOCK
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 088cb0a82..a9865cb44 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -15,18 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
import os
-from nova import flags
-from nova.openstack.common import fileutils
+from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import utils as libvirt_utils
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
-class _ImageTestCase(test.TestCase):
+class _ImageTestCase(object):
INSTANCES_PATH = '/fake'
def mock_create_image(self, image):
@@ -38,17 +39,19 @@ class _ImageTestCase(test.TestCase):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
- self.INSTANCE = 'instance'
+ self.INSTANCE = {'name': 'instance'}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
- self.PATH = os.path.join(FLAGS.instances_path, self.INSTANCE,
- self.NAME)
- self.TEMPLATE_DIR = os.path.join(FLAGS.instances_path,
+ self.PATH = os.path.join(
+ libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
+ self.TEMPLATE_DIR = os.path.join(CONF.instances_path,
'_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
- imagebackend.libvirt_utils = fake_libvirt_utils
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
@@ -108,7 +111,7 @@ class _ImageTestCase(test.TestCase):
self.mox.VerifyAll()
-class RawTestCase(_ImageTestCase):
+class RawTestCase(_ImageTestCase, test.TestCase):
SIZE = 1024
@@ -158,7 +161,7 @@ class RawTestCase(_ImageTestCase):
self.mox.VerifyAll()
-class Qcow2TestCase(_ImageTestCase):
+class Qcow2TestCase(_ImageTestCase, test.TestCase):
SIZE = 1024 * 1024 * 1024
def setUp(self):
@@ -193,26 +196,9 @@ class Qcow2TestCase(_ImageTestCase):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
- os.path.exists(self.QCOW2_BASE).AndReturn(False)
- imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
- self.QCOW2_BASE)
- imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE)
- imagebackend.libvirt_utils.create_cow_image(self.QCOW2_BASE,
- self.PATH)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
-
- self.mox.VerifyAll()
-
- def test_create_image_with_size_template_exists(self):
- fn = self.prepare_mocks()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(os.path, 'exists')
- os.path.exists(self.QCOW2_BASE).AndReturn(True)
- imagebackend.libvirt_utils.create_cow_image(self.QCOW2_BASE,
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
@@ -221,7 +207,7 @@ class Qcow2TestCase(_ImageTestCase):
self.mox.VerifyAll()
-class LvmTestCase(_ImageTestCase):
+class LvmTestCase(_ImageTestCase, test.TestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
@@ -230,7 +216,7 @@ class LvmTestCase(_ImageTestCase):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
- self.LV = '%s_%s' % (self.INSTANCE, self.NAME)
+ self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
@@ -357,7 +343,7 @@ class LvmTestCase(_ImageTestCase):
class BackendTestCase(test.TestCase):
- INSTANCE = 'fake-instance'
+ INSTANCE = {'name': 'fake-instance'}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 8203277ae..eaf244c56 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -19,24 +19,26 @@
import contextlib
import cStringIO
import hashlib
+import json
import logging
import os
import time
from nova import test
-from nova.compute import manager as compute_manager
from nova.compute import vm_states
+from nova import conductor
from nova import db
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as virtutils
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
@@ -52,7 +54,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_read_stored_checksum_missing(self):
self.stubs.Set(os.path, 'exists', lambda x: False)
- csum = imagecache.read_stored_checksum('/tmp/foo')
+ csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertEquals(csum, None)
def test_read_stored_checksum(self):
@@ -63,12 +65,13 @@ class ImageCacheManagerTestCase(test.TestCase):
csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
fname = os.path.join(tmpdir, 'aaa')
- info_fname = virtutils.get_info_filename(fname)
+ info_fname = imagecache.get_info_filename(fname)
f = open(info_fname, 'w')
f.write(csum_input)
f.close()
- csum_output = imagecache.read_stored_checksum(fname)
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
self.assertEquals(csum_input.rstrip(),
'{"sha1": "%s"}' % csum_output)
@@ -84,10 +87,12 @@ class ImageCacheManagerTestCase(test.TestCase):
f.write('fdghkfhkgjjksfdgjksjkghsdf')
f.close()
- csum_output = imagecache.read_stored_checksum(fname)
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
self.assertEquals(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
self.assertFalse(os.path.exists(old_fname))
- self.assertTrue(os.path.exists(virtutils.get_info_filename(fname)))
+ info_fname = imagecache.get_info_filename(fname)
+ self.assertTrue(os.path.exists(info_fname))
def test_list_base_images(self):
listing = ['00000001',
@@ -147,13 +152,13 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_list_running_instances(self):
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'inst-2',
'uuid': '456',
'vm_state': '',
@@ -182,7 +187,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_list_resizing_instances(self):
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': vm_states.RESIZED,
@@ -209,7 +214,7 @@ class ImageCacheManagerTestCase(test.TestCase):
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
- found = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name,
+ found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
@@ -231,7 +236,7 @@ class ImageCacheManagerTestCase(test.TestCase):
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
- found = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name,
+ found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240')
@@ -252,7 +257,7 @@ class ImageCacheManagerTestCase(test.TestCase):
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
- found = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name,
+ found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
@@ -348,11 +353,10 @@ class ImageCacheManagerTestCase(test.TestCase):
'operating system.')
fname = os.path.join(tmpdir, 'aaa')
- info_fname = virtutils.get_info_filename(fname)
+ info_fname = imagecache.get_info_filename(fname)
- f = open(fname, 'w')
- f.write(testdata)
- f.close()
+ with open(fname, 'w') as f:
+ f.write(testdata)
return fname, info_fname, testdata
@@ -479,26 +483,23 @@ class ImageCacheManagerTestCase(test.TestCase):
self.assertNotEqual(log.find('image verification failed'), -1)
def test_verify_checksum_file_missing(self):
- img = {'container_format': 'ami', 'id': '42'}
-
self.flags(checksum_base_images=True)
- with self._intercept_log_messages() as stream:
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'))
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'))
- fname, info_fname, testdata = self._make_checksum(tmpdir)
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
- # Checksum file missing
- image_cache_manager = imagecache.ImageCacheManager()
- res = image_cache_manager._verify_checksum(img, fname)
- self.assertEquals(res, None)
+ # Checksum file missing
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = image_cache_manager._verify_checksum('aaa', fname)
+ self.assertEquals(res, None)
- # Checksum requests for a file with no checksum now have the
- # side effect of creating the checksum
- self.assertTrue(os.path.exists(info_fname))
+ # Checksum requests for a file with no checksum now have the
+ # side effect of creating the checksum
+ self.assertTrue(os.path.exists(info_fname))
@contextlib.contextmanager
def _make_base_file(self, checksum=True):
@@ -525,7 +526,7 @@ class ImageCacheManagerTestCase(test.TestCase):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
- info_fname = virtutils.get_info_filename(fname)
+ info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
@@ -543,7 +544,7 @@ class ImageCacheManagerTestCase(test.TestCase):
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.originals = [fname]
image_cache_manager._remove_base_file(fname)
- info_fname = virtutils.get_info_filename(fname)
+ info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
@@ -627,22 +628,22 @@ class ImageCacheManagerTestCase(test.TestCase):
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used_remotely(self):
+ self.stubs.Set(virtutils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
- image_cache_manager._handle_base_image(img, None)
+ image_cache_manager._handle_base_image(img, fname)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files, [])
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_absent(self):
- """Ensure we warn for use of a missing base image."""
-
img = '123'
with self._intercept_log_messages() as stream:
@@ -682,9 +683,12 @@ class ImageCacheManagerTestCase(test.TestCase):
img = '123'
with self._make_base_file() as fname:
- f = open(fname, 'w')
- f.write('banana')
- f.close()
+ with open(fname, 'w') as f:
+ f.write('banana')
+
+ d = {'sha1': '21323454'}
+ with open('%s.info' % fname, 'w') as f:
+ f.write(json.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
@@ -759,7 +763,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def listdir(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
- return orig_list(path)
+ return orig_listdir(path)
if path == '/instance_path':
return ['instance-1', 'instance-2', 'instance-3', '_base']
@@ -789,13 +793,13 @@ class ImageCacheManagerTestCase(test.TestCase):
# Fake the database call which lists running instances
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
@@ -868,14 +872,15 @@ class ImageCacheManagerTestCase(test.TestCase):
self.flags(instances_path='/tmp/no/such/dir/name/please')
self.flags(image_info_filename_pattern=('$instances_path/_base/'
'%(image)s.info'))
- base_filename = os.path.join(FLAGS.instances_path, '_base', hashed)
+ base_filename = os.path.join(CONF.instances_path, '_base', hashed)
- self.assertFalse(virtutils.is_valid_info_file('banana'))
- self.assertFalse(virtutils.is_valid_info_file(
- os.path.join(FLAGS.instances_path, '_base', '00000001')))
- self.assertFalse(virtutils.is_valid_info_file(base_filename))
- self.assertFalse(virtutils.is_valid_info_file(base_filename + '.sha1'))
- self.assertTrue(virtutils.is_valid_info_file(base_filename + '.info'))
+ is_valid_info_file = imagecache.is_valid_info_file
+ self.assertFalse(is_valid_info_file('banana'))
+ self.assertFalse(is_valid_info_file(
+ os.path.join(CONF.instances_path, '_base', '00000001')))
+ self.assertFalse(is_valid_info_file(base_filename))
+ self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
+ self.assertTrue(is_valid_info_file(base_filename + '.info'))
def test_configured_checksum_path(self):
with utils.tempdir() as tmpdir:
@@ -889,13 +894,13 @@ class ImageCacheManagerTestCase(test.TestCase):
# Fake the database call which lists running instances
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
@@ -924,22 +929,27 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_compute_manager(self):
was = {'called': False}
- def fake_get_all(context):
+ def fake_get_all(context, *args, **kwargs):
was['called'] = True
return [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
- self.stubs.Set(db, 'instance_get_all', fake_get_all)
- compute = importutils.import_object(FLAGS.compute_manager)
- compute._run_image_cache_manager_pass(None)
- self.assertTrue(was['called'])
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ self.stubs.Set(db, 'instance_get_all', fake_get_all)
+ compute = importutils.import_object(CONF.compute_manager)
+ self.flags(use_local=True, group='conductor')
+ compute.conductor_api = conductor.API()
+ compute._run_image_cache_manager_pass(None)
+ self.assertTrue(was['called'])
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 43d698374..4a136cf13 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -23,18 +23,16 @@ from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import session as sql_session
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import test
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class InstanceTypeTestCase(test.TestCase):
- """Test cases for instance type code"""
+ """Test cases for instance type code."""
def _generate_name(self):
- """return a name not in the DB"""
+ """return a name not in the DB."""
nonexistent_flavor = str(int(time.time()))
flavors = instance_types.get_all_types()
while nonexistent_flavor in flavors:
@@ -43,7 +41,7 @@ class InstanceTypeTestCase(test.TestCase):
return nonexistent_flavor
def _generate_flavorid(self):
- """return a flavorid not in the DB"""
+ """return a flavorid not in the DB."""
nonexistent_flavor = 2700
flavor_ids = [value["id"] for key, value in
instance_types.get_all_types().iteritems()]
@@ -53,11 +51,11 @@ class InstanceTypeTestCase(test.TestCase):
return nonexistent_flavor
def _existing_flavor(self):
- """return first instance type name"""
+ """return first instance type name."""
return instance_types.get_all_types().keys()[0]
def test_instance_type_create(self):
- """Ensure instance types can be created"""
+ # Ensure instance types can be created.
name = 'Instance create test'
flavor_id = '512'
@@ -73,7 +71,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 0)
self.assertEqual(inst_type['swap'], 0)
- self.assertEqual(inst_type['rxtx_factor'], 1)
+ self.assertEqual(inst_type['rxtx_factor'], 1.0)
# make sure new type shows up in list
new_list = instance_types.get_all_types()
@@ -81,7 +79,7 @@ class InstanceTypeTestCase(test.TestCase):
'instance type was not created')
def test_instance_type_create_then_delete(self):
- """Ensure instance types can be created"""
+ # Ensure instance types can be created.
name = 'Small Flavor'
flavorid = 'flavor1'
@@ -97,7 +95,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 100)
self.assertEqual(inst_type['swap'], 0)
- self.assertEqual(inst_type['rxtx_factor'], 1)
+ self.assertEqual(inst_type['rxtx_factor'], 1.0)
# make sure new type shows up in list
new_list = instance_types.get_all_types()
@@ -122,24 +120,37 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(inst_type['root_gb'], 120)
self.assertEqual(inst_type['ephemeral_gb'], 100)
self.assertEqual(inst_type['swap'], 0)
- self.assertEqual(inst_type['rxtx_factor'], 1)
+ self.assertEqual(inst_type['rxtx_factor'], 1.0)
+
+ def test_instance_type_create_with_custom_rxtx_factor(self):
+ name = 'Custom RXTX Factor'
+ inst_type = instance_types.create(name, 256, 1, 120, 100,
+ rxtx_factor=9.9)
+ self.assertNotEqual(inst_type['flavorid'], None)
+ self.assertEqual(inst_type['name'], name)
+ self.assertEqual(inst_type['memory_mb'], 256)
+ self.assertEqual(inst_type['vcpus'], 1)
+ self.assertEqual(inst_type['root_gb'], 120)
+ self.assertEqual(inst_type['ephemeral_gb'], 100)
+ self.assertEqual(inst_type['swap'], 0)
+ self.assertEqual(inst_type['rxtx_factor'], 9.9)
def test_instance_type_create_with_special_characters(self):
- """Ensure instance types raises InvalidInput for invalid characters"""
+ # Ensure instance types raises InvalidInput for invalid characters.
name = "foo.bar!@#$%^-test_name"
flavorid = "flavor1"
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 256, 1, 120, 100, flavorid)
def test_get_all_instance_types(self):
- """Ensures that all instance types can be retrieved"""
+ # Ensures that all instance types can be retrieved.
session = sql_session.get_session()
total_instance_types = session.query(models.InstanceTypes).count()
inst_types = instance_types.get_all_types()
self.assertEqual(total_instance_types, len(inst_types))
def test_invalid_create_args_should_fail(self):
- """Ensures that instance type creation fails with invalid args"""
+ # Ensures that instance type creation fails with invalid args.
invalid_sigs = [
(('Zero memory', 0, 1, 10, 20, 'flavor1'), {}),
(('Negative memory', -256, 1, 10, 20, 'flavor1'), {}),
@@ -166,13 +177,13 @@ class InstanceTypeTestCase(test.TestCase):
instance_types.create, *args, **kwargs)
def test_non_existent_inst_type_shouldnt_delete(self):
- """Ensures that instance type creation fails with invalid args"""
+ # Ensures that instance type creation fails with invalid args.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy,
'unknown_flavor')
def test_duplicate_names_fail(self):
- """Ensures that name duplicates raise InstanceTypeCreateFailed"""
+ # Ensures that name duplicates raise InstanceTypeCreateFailed.
name = 'some_name'
instance_types.create(name, 256, 1, 120, 200, 'flavor1')
self.assertRaises(exception.InstanceTypeExists,
@@ -180,7 +191,7 @@ class InstanceTypeTestCase(test.TestCase):
name, 256, 1, 120, 200, 'flavor2')
def test_duplicate_flavorids_fail(self):
- """Ensures that flavorid duplicates raise InstanceTypeCreateFailed"""
+ # Ensures that flavorid duplicates raise InstanceTypeCreateFailed.
flavorid = 'flavor1'
instance_types.create('name one', 256, 1, 120, 200, flavorid)
self.assertRaises(exception.InstanceTypeIdExists,
@@ -188,12 +199,12 @@ class InstanceTypeTestCase(test.TestCase):
'name two', 256, 1, 120, 200, flavorid)
def test_will_not_destroy_with_no_name(self):
- """Ensure destroy said path of no name raises error"""
+ # Ensure destroy said path of no name raises error.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy, None)
def test_will_not_get_bad_default_instance_type(self):
- """ensures error raised on bad default instance type"""
+ # ensures error raised on bad default instance type.
self.flags(default_instance_type='unknown_flavor')
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_default_instance_type)
@@ -205,28 +216,28 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(default_instance_type, fetched)
def test_will_not_get_instance_type_by_unknown_id(self):
- """Ensure get by name returns default flavor with no name"""
+ # Ensure get by name returns default flavor with no name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 10000)
def test_will_not_get_instance_type_with_bad_id(self):
- """Ensure get by name returns default flavor with bad name"""
+ # Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 'asdf')
def test_instance_type_get_by_None_name_returns_default(self):
- """Ensure get by name returns default flavor with no name"""
+ # Ensure get by name returns default flavor with no name.
default = instance_types.get_default_instance_type()
actual = instance_types.get_instance_type_by_name(None)
self.assertEqual(default, actual)
def test_will_not_get_instance_type_with_bad_name(self):
- """Ensure get by name returns default flavor with bad name"""
+ # Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type_by_name, 10000)
def test_will_not_get_instance_by_unknown_flavor_id(self):
- """Ensure get by flavor raises error with wrong flavorid"""
+ # Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
instance_types.get_instance_type_by_flavor_id,
'unknown_flavor')
@@ -238,7 +249,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(default_instance_type, fetched)
def test_can_read_deleted_types_using_flavor_id(self):
- """Ensure deleted instance types can be read when querying flavor_id"""
+ # Ensure deleted instance types can be read when querying flavor_id.
inst_type_name = "test"
inst_type_flavor_id = "test1"
@@ -269,7 +280,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual("instance_type1_redo", instance_type["name"])
def test_will_list_deleted_type_for_active_instance(self):
- """Ensure deleted instance types with active instances can be read"""
+ # Ensure deleted instance types with active instances can be read.
ctxt = context.get_admin_context()
inst_type = instance_types.create("test", 256, 1, 120, 100, "test1")
@@ -288,7 +299,7 @@ class InstanceTypeTestCase(test.TestCase):
class InstanceTypeFilteringTest(test.TestCase):
- """Test cases for the filter option available for instance_type_get_all"""
+ """Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
self.context = context.get_admin_context()
@@ -306,19 +317,19 @@ class InstanceTypeFilteringTest(test.TestCase):
self.assertFilterResults(filters, expected)
def test_min_memory_mb_filter(self):
- """Exclude tiny instance which is 512 MB"""
+ # Exclude tiny instance which is 512 MB.
filters = dict(min_memory_mb=513)
expected = ['m1.large', 'm1.medium', 'm1.small', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_root_gb_filter(self):
- """Exclude everything but large and xlarge which have >= 80 GB"""
+ # Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_root_gb=80)
expected = ['m1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_AND_root_gb_filter(self):
- """Exclude everything but large and xlarge which have >= 80 GB"""
+ # Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_memory_mb=16384, min_root_gb=80)
expected = ['m1.xlarge']
self.assertFilterResults(filters, expected)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index ea35ff29e..de0745654 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -18,6 +18,7 @@
import copy
import errno
import eventlet
+import fixtures
import json
import mox
import os
@@ -31,12 +32,13 @@ from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -45,21 +47,21 @@ from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_network
import nova.tests.image.fake
+from nova.tests import matchers
from nova import utils
+from nova import version
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
-from nova.virt.libvirt import config
+from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
-from nova.virt.libvirt import snapshots
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import volume
from nova.virt.libvirt import volume_nfs
-from nova.volume import driver as volume_driver
try:
@@ -69,7 +71,11 @@ except ImportError:
libvirt_driver.libvirt = libvirt
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
LOG = logging.getLogger(__name__)
_fake_network_info = fake_network.fake_get_instance_nw_info
@@ -77,7 +83,8 @@ _fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
-def _concurrency(wait, done, target):
+def _concurrency(signal, wait, done, target):
+ signal.send()
wait.wait()
done.send()
@@ -172,19 +179,26 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./serial').text, 'fake_serial')
+ def iscsi_connection(self, volume, location, iqn):
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ }
+ }
+
def test_libvirt_iscsi_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
- vol_driver = volume_driver.ISCSIDriver()
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
- vol = {'id': 1,
- 'name': name,
- 'provider_auth': None,
- 'provider_location': '%s,fake %s' % (location, iqn)}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -192,7 +206,6 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -212,18 +225,14 @@ class LibvirtVolumeTestCase(test.TestCase):
def test_libvirt_iscsi_driver_still_in_use(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
- vol_driver = volume_driver.ISCSIDriver()
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)]
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
- vol = {'id': 1,
- 'name': name,
- 'provider_auth': None,
- 'provider_location': '%s,fake %s' % (location, iqn)}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -231,7 +240,6 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -241,12 +249,19 @@ class LibvirtVolumeTestCase(test.TestCase):
'-n', 'node.startup', '-v', 'automatic')]
self.assertEqual(self.executes, expected_commands)
+ def sheepdog_connection(self, volume):
+ return {
+ 'driver_volume_type': 'sheepdog',
+ 'data': {
+ 'name': volume['name']
+ }
+ }
+
def test_libvirt_sheepdog_driver(self):
- vol_driver = volume_driver.SheepdogDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.sheepdog_connection(vol)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -254,31 +269,39 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
self.assertEqual(tree.find('./source').get('name'), name)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
+
+ def rbd_connection(self, volume):
+ return {
+ 'driver_volume_type': 'rbd',
+ 'data': {
+ 'name': '%s/%s' % ('rbd', volume['name']),
+ 'auth_enabled': CONF.rbd_secret_uuid is not None,
+ 'auth_username': CONF.rbd_user,
+ 'secret_type': 'ceph',
+ 'secret_uuid': CONF.rbd_secret_uuid,
+ }
+ }
def test_libvirt_rbd_driver(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./source/auth'), None)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_enabled(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -292,20 +315,18 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth').get('username'), user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -324,20 +345,18 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_disabled(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -351,18 +370,16 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth'), None)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -383,34 +400,12 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
-
- def test_libvirt_lxc_volume(self):
- self.stubs.Set(os.path, 'exists', lambda x: True)
- vol_driver = volume_driver.ISCSIDriver()
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- location = '10.0.2.15:3260'
- name = 'volume-00000001'
- iqn = 'iqn.2010-10.org.openstack:%s' % name
- vol = {'id': 1,
- 'name': name,
- 'provider_auth': None,
- 'provider_location': '%s,fake %s' % (location, iqn)}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
- self.assertEqual(tree.get('type'), 'block')
- self.assertEqual(tree.find('./source').get('dev'), dev_str)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_nfs_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
@@ -448,11 +443,11 @@ class CacheConcurrencyTestCase(test.TestCase):
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
- self.lock_path = os.path.join(FLAGS.instances_path, 'locks')
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
- basedir = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
+ basedir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if fname == basedir or fname == self.lock_path:
return True
return False
@@ -466,11 +461,11 @@ class CacheConcurrencyTestCase(test.TestCase):
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
- imagebackend.libvirt_utils = fake_libvirt_utils
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
def tearDown(self):
- imagebackend.libvirt_utils = libvirt_utils
-
# Make sure the lock_path for this test is cleaned up
if os.path.exists(self.lock_path):
shutil.rmtree(self.lock_path)
@@ -478,16 +473,27 @@ class CacheConcurrencyTestCase(test.TestCase):
super(CacheConcurrencyTestCase, self).tearDown()
def test_same_fname_concurrency(self):
- """Ensures that the same fname cache runs at a sequentially"""
+ # Ensures that the same fname cache runs at a sequentially.
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image('instance', 'name').cache,
- _concurrency, 'fname', None, wait=wait1, done=done1)
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image('instance', 'name').cache,
- _concurrency, 'fname', None, wait=wait2, done=done2)
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig2, wait=wait2, done=done2)
+
wait2.send()
eventlet.sleep(0)
try:
@@ -503,16 +509,30 @@ class CacheConcurrencyTestCase(test.TestCase):
thr2.wait()
def test_different_fname_concurrency(self):
- """Ensures that two different fname caches are concurrent"""
+ # Ensures that two different fname caches are concurrent.
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image('instance', 'name').cache,
- _concurrency, 'fname2', None, wait=wait1, done=done1)
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
+ _concurrency, 'fname2', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image('instance', 'name').cache,
- _concurrency, 'fname1', None, wait=wait2, done=done2)
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ 'name').cache,
+ _concurrency, 'fname1', None,
+ signal=sig2, wait=wait2, done=done2)
+ eventlet.sleep(0)
+ # Wait for thread 2 to start.
+ sig2.wait()
+
wait2.send()
eventlet.sleep(0)
try:
@@ -551,8 +571,12 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(instances_path='')
self.flags(libvirt_snapshots_directory='')
self.call_libvirt_dependant_setup = False
- libvirt_driver.libvirt_utils = fake_libvirt_utils
- snapshots.libvirt_utils = fake_libvirt_utils
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.snapshots.libvirt_utils',
+ fake_libvirt_utils))
def fake_extend(image, size):
pass
@@ -560,23 +584,23 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
nova.tests.image.fake.stub_out_image_service(self.stubs)
+ self.test_instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': '5'} # m1.small
def tearDown(self):
- libvirt_driver.libvirt_utils = libvirt_utils
nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
- test_instance = {'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
- 'root_gb': 10,
- 'ephemeral_gb': 20,
- 'instance_type_id': '5'} # m1.small
-
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
@@ -608,8 +632,7 @@ class LibvirtConnTestCase(test.TestCase):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': 'zone'}
+ 'report_count': 0}
return db.service_create(context.get_admin_context(), service_ref)
@@ -630,7 +653,7 @@ class LibvirtConnTestCase(test.TestCase):
'id': 'fake'
}
result = conn.get_volume_connector(volume)
- self.assertDictMatch(expected, result)
+ self.assertThat(expected, matchers.DictMatches(result))
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -640,6 +663,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(cfg.acpi, True)
+ self.assertEquals(cfg.apic, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
@@ -647,28 +671,28 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 7)
self.assertEquals(type(cfg.devices[0]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestInterface)
+ vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
- config.LibvirtConfigGuestInput)
+ vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[6]),
- config.LibvirtConfigGuestGraphics)
+ vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.clock),
- config.LibvirtConfigGuestClock)
+ vconfig.LibvirtConfigGuestClock)
self.assertEquals(cfg.clock.offset, "utc")
self.assertEquals(len(cfg.clock.timers), 2)
self.assertEquals(type(cfg.clock.timers[0]),
- config.LibvirtConfigGuestTimer)
+ vconfig.LibvirtConfigGuestTimer)
self.assertEquals(type(cfg.clock.timers[1]),
- config.LibvirtConfigGuestTimer)
+ vconfig.LibvirtConfigGuestTimer)
self.assertEquals(cfg.clock.timers[0].name, "pit")
self.assertEquals(cfg.clock.timers[0].tickpolicy,
"delay")
@@ -691,21 +715,21 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestInterface)
+ vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
- config.LibvirtConfigGuestInterface)
+ vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[4]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[6]),
- config.LibvirtConfigGuestInput)
+ vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[7]),
- config.LibvirtConfigGuestGraphics)
+ vconfig.LibvirtConfigGuestGraphics)
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
@@ -722,11 +746,11 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(cfg.os_root, 'dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestConsole)
+ vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -739,10 +763,10 @@ class LibvirtConnTestCase(test.TestCase):
cfg = conn.get_guest_config(instance_ref, [], None, None, info)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
self.assertEquals(type(cfg.devices[3]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[3].target_dev, 'vdd')
def test_get_guest_cpu_config_none(self):
@@ -772,7 +796,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
@@ -815,7 +839,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
self.assertEquals(conf.cpu.model, None)
@@ -834,7 +858,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
@@ -854,7 +878,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
self.assertEquals(conf.cpu.model, "Penryn")
@@ -880,12 +904,15 @@ class LibvirtConnTestCase(test.TestCase):
# Ensure we have a predictable host CPU
def get_host_capabilities_stub(self):
- cpu = config.LibvirtConfigGuestCPU()
+ cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
- caps = config.LibvirtConfigCaps()
- caps.host = config.LibvirtConfigCapsHost()
+ cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
+ cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
@@ -903,10 +930,13 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
+ self.assertEquals(len(conf.cpu.features), 2)
+ self.assertEquals(conf.cpu.features[0].name, "tm2")
+ self.assertEquals(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub(self):
@@ -924,7 +954,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Penryn")
@@ -990,8 +1020,7 @@ class LibvirtConnTestCase(test.TestCase):
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_uuid(instance_data)
+ self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
@@ -1037,6 +1066,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -1044,6 +1074,19 @@ class LibvirtConnTestCase(test.TestCase):
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
+ def test_list_defined_instances(self):
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0]
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: [1]
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instances = conn.list_instances()
+ # Only one defined domain should be listed
+ self.assertEquals(len(instances), 1)
+
def test_list_instances_when_instance_deleted(self):
def fake_lookup(instance_name):
@@ -1053,6 +1096,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -1167,6 +1211,7 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -1174,6 +1219,16 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual(devices, ['vda', 'vdb'])
def test_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1203,15 +1258,28 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1242,15 +1310,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1281,15 +1361,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1321,15 +1413,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./')
@@ -1356,15 +1460,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1392,15 +1508,27 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1430,14 +1558,26 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1468,14 +1608,26 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./')
# Start test
@@ -1501,14 +1653,26 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
@@ -1535,9 +1699,11 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
@@ -1550,8 +1716,8 @@ class LibvirtConnTestCase(test.TestCase):
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
- "fake",
- "/dev/fake")
+ {"name": "fake-instance"},
+ "/dev/fake")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
@@ -1779,6 +1945,43 @@ class LibvirtConnTestCase(test.TestCase):
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
+ xpath = "./sysinfo/system/entry"
+ check = (lambda t: t.findall(xpath)[0].get("name"),
+ "manufacturer")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[0].text,
+ version.vendor_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[1].get("name"),
+ "product")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[1].text,
+ version.product_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[2].get("name"),
+ "version")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[2].text,
+ version.version_string_with_package())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[3].get("name"),
+ "serial")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[3].text,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[4].get("name"),
+ "uuid")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[4].text,
+ instance['uuid'])
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
@@ -1838,9 +2041,9 @@ class LibvirtConnTestCase(test.TestCase):
# This test is supposed to make sure we don't
# override a specifically set uri
#
- # Deliberately not just assigning this string to FLAGS.libvirt_uri and
+ # Deliberately not just assigning this string to CONF.libvirt_uri and
# checking against that later on. This way we make sure the
- # implementation doesn't fiddle around with the FLAGS.
+ # implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
@@ -1850,7 +2053,7 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
- """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
@@ -1923,11 +2126,11 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, True)
- self.assertDictMatch(return_value,
- {"filename": "file",
- 'disk_available_mb': 409600,
- "disk_over_commit": False,
- "block_migration": True})
+ self.assertThat({"filename": "file",
+ 'disk_available_mb': 409600,
+ "disk_over_commit": False,
+ "block_migration": True},
+ matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1949,11 +2152,11 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
- self.assertDictMatch(return_value,
- {"filename": "file",
- "block_migration": False,
- "disk_over_commit": False,
- "disk_available_mb": None})
+ self.assertThat({"filename": "file",
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": None},
+ matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1964,7 +2167,9 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(conn, '_compare_cpu')
- conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo)
+ conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
+ reason='foo')
+ )
self.mox.ReplayAll()
self.assertRaises(exception.InvalidCPUInfo,
@@ -2007,6 +2212,37 @@ class LibvirtConnTestCase(test.TestCase):
conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
+ def test_check_can_live_migrate_source_vol_backed_works_correctly(self):
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ dest_check_data = {"filename": "file",
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": 1024,
+ "is_volume_backed": True}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
+ conn._check_shared_storage_test_file("file").AndReturn(False)
+ self.mox.ReplayAll()
+ ret = conn.check_can_live_migrate_source(self.context, instance_ref,
+ dest_check_data)
+ self.assertTrue(type(ret) == dict)
+ self.assertTrue('is_shared_storage' in ret)
+
+ def test_check_can_live_migrate_source_vol_backed_fails(self):
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ dest_check_data = {"filename": "file",
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": 1024,
+ "is_volume_backed": False}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
+ conn._check_shared_storage_test_file("file").AndReturn(False)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source, self.context,
+ instance_ref, dest_check_data)
+
def test_check_can_live_migrate_dest_fail_shared_storage_with_blockm(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
@@ -2062,33 +2298,27 @@ class LibvirtConnTestCase(test.TestCase):
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
- """Confirms recover method is called when exceptions are raised."""
+ # Confirms recover method is called when exceptions are raised.
# Preparing data
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
instance_dict)
- vol_dict = {'status': 'migrating', 'size': 1}
- volume_ref = db.volume_create(self.context, vol_dict)
- db.volume_attached(self.context,
- volume_ref['id'],
- instance_ref['uuid'],
- '/dev/fake')
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
- _bandwidth = FLAGS.live_migration_bandwidth
- vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
+ _bandwidth = CONF.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
- if instance_name == instance_ref.name:
+ if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
@@ -2107,10 +2337,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
- volume_ref = db.volume_get(self.context, volume_ref['id'])
- self.assertTrue(volume_ref['status'] == 'in-use')
- db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['uuid'])
def test_pre_live_migration_works_correctly_mocked(self):
@@ -2144,6 +2371,42 @@ class LibvirtConnTestCase(test.TestCase):
result = conn.pre_live_migration(c, inst_ref, vol, nw_info)
self.assertEqual(result, None)
+ def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
+ # Creating testdata, using temp dir.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+ inst_ref = db.instance_create(self.context, self.test_instance)
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+ # Creating mocks
+ self.mox.StubOutWithMock(conn, "volume_driver_method")
+ for v in vol['block_device_mapping']:
+ conn.volume_driver_method('connect_volume',
+ v['connection_info'],
+ v['mount_device'].
+ rpartition("/")[2])
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_storage': False,
+ 'is_volume_backed': True,
+ 'block_migration': False
+ }
+ ret = conn.pre_live_migration(c, inst_ref, vol, nw_info,
+ migrate_data)
+ self.assertEqual(ret, None)
+ self.assertTrue(os.path.exists('%s/%s/' %
+ (tmpdir, inst_ref.name)))
+ db.instance_destroy(self.context, inst_ref['uuid'])
+
def test_pre_block_migration_works_correctly(self):
# Replace instances_path since this testcase creates tmpfile
with utils.tempdir() as tmpdir:
@@ -2166,7 +2429,7 @@ class LibvirtConnTestCase(test.TestCase):
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename='otherdisk',
- image_id=123456,
+ image_id=self.test_instance['image_ref'],
project_id='fake',
size=10737418240L,
user_id=None).AndReturn(None)
@@ -2177,7 +2440,7 @@ class LibvirtConnTestCase(test.TestCase):
dummyjson)
self.assertTrue(os.path.exists('%s/%s/' %
- (tmpdir, instance_ref.name)))
+ (tmpdir, instance_ref['name'])))
db.instance_destroy(self.context, instance_ref['uuid'])
@@ -2200,7 +2463,7 @@ class LibvirtConnTestCase(test.TestCase):
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
- if instance_name == instance_ref.name:
+ if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
@@ -2220,13 +2483,16 @@ class LibvirtConnTestCase(test.TestCase):
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- info = conn.get_instance_disk_info(instance_ref.name)
+ info = conn.get_instance_disk_info(instance_ref['name'])
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
self.assertEquals(info[0]['path'], '/test/disk')
@@ -2296,16 +2562,49 @@ class LibvirtConnTestCase(test.TestCase):
conn.spawn(self.context, instance, None, [], 'herp',
network_info=network_info)
- path = os.path.join(FLAGS.instances_path, instance.name)
+ path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
- path = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
+ path = os.path.join(CONF.instances_path, CONF.base_dir_name)
if os.path.isdir(path):
- shutil.rmtree(os.path.join(FLAGS.instances_path,
- FLAGS.base_dir_name))
+ shutil.rmtree(os.path.join(CONF.instances_path,
+ CONF.base_dir_name))
+
+ def test_spawn_without_image_meta(self):
+ self.create_image_called = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_create_image(*args, **kwargs):
+ self.create_image_called = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = db.instance_create(self.context, instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'to_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_create_image)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertFalse(self.create_image_called)
+
+ conn.spawn(self.context,
+ instance,
+ {'id': instance['image_ref']},
+ [],
+ None)
+ self.assertTrue(self.create_image_called)
def test_get_console_output_file(self):
+ fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
@@ -2315,11 +2614,7 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
- os.mkdir(console_dir)
console_log = '%s/console.log' % (console_dir)
- f = open(console_log, "w")
- f.write("foo")
- f.close()
fake_dom_xml = """
<domain type='kvm'>
<devices>
@@ -2339,13 +2634,20 @@ class LibvirtConnTestCase(test.TestCase):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
- libvirt_driver.libvirt_utils = fake_libvirt_utils
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- output = conn.get_console_output(instance)
- self.assertEquals("foo", output)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEquals('67890', output)
def test_get_console_output_pty(self):
+ fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
@@ -2355,11 +2657,7 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
- os.mkdir(console_dir)
pty_file = '%s/fake_pty' % (console_dir)
- f = open(pty_file, "w")
- f.write("foo")
- f.close()
fake_dom_xml = """
<domain type='kvm'>
<devices>
@@ -2378,22 +2676,31 @@ class LibvirtConnTestCase(test.TestCase):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
- with open(fake_pty, 'r') as fp:
- return fp.read()
+ return 'foo'
+
+ def _fake_append_to_file(self, data, fpath):
+ return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
- libvirt_driver.libvirt_utils = fake_libvirt_utils
+ libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- output = conn.get_console_output(instance)
- self.assertEquals("foo", output)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEquals('67890', output)
def test_get_host_ip_addr(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
- self.assertEquals(ip, FLAGS.my_ip)
+ self.assertEquals(ip, CONF.my_ip)
def test_broken_connection(self):
for (error, domain) in (
@@ -2457,7 +2764,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_immediate_delete(self):
def fake_lookup_by_name(instance_name):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
@@ -2465,6 +2772,74 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
+ def test_destroy_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+ self.mox.StubOutWithMock(shutil, "rmtree")
+ shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
+ libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(instance, [])
+
+ def test_destroy_not_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(instance, [], None, False)
+
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.destroy()
@@ -2560,7 +2935,7 @@ class LibvirtConnTestCase(test.TestCase):
return mock
def fake_get_info(instance_name):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
@@ -2571,7 +2946,7 @@ class LibvirtConnTestCase(test.TestCase):
conn._destroy(instance)
def test_available_least_handles_missing(self):
- """Ensure destroy calls managedSaveRemove for saved instance"""
+ # Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
@@ -2579,18 +2954,18 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(conn, 'list_instances', list_instances)
def get_info(instance_name):
- raise exception.InstanceNotFound()
+ raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
result = conn.get_disk_available_least()
- space = fake_libvirt_utils.get_fs_info(FLAGS.instances_path)['free']
+ space = fake_libvirt_utils.get_fs_info(CONF.instances_path)['free']
self.assertEqual(result, space / 1024 ** 3)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
- cpu = config.LibvirtConfigCPU()
+ cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = "x86_64"
@@ -2599,20 +2974,20 @@ class LibvirtConnTestCase(test.TestCase):
cpu.threads = 1
cpu.sockets = 4
- cpu.add_feature(config.LibvirtConfigCPUFeature("extapic"))
- cpu.add_feature(config.LibvirtConfigCPUFeature("3dnow"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
- caps = config.LibvirtConfigCaps()
- caps.host = config.LibvirtConfigCapsHost()
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
guest.domtype = ["kvm"]
caps.guests.append(guest)
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
guest.domtype = ["kvm"]
@@ -3025,15 +3400,15 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
- caps = config.LibvirtConfigCaps()
+ caps = vconfig.LibvirtConfigCaps()
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'x86_64'
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'i686'
guest.domtype = ['kvm']
@@ -3062,7 +3437,7 @@ class HostStateTestCase(test.TestCase):
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
- """Fake connection object"""
+ """Fake connection object."""
def get_vcpu_total(self):
return 1
@@ -3171,6 +3546,7 @@ class IptablesFirewallTestCase(test.TestCase):
pass
self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
+ fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
in_nat_rules = [
@@ -3182,6 +3558,20 @@ class IptablesFirewallTestCase(test.TestCase):
':POSTROUTING ACCEPT [5063:386098]',
]
+ in_mangle_rules = [
+ '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
+ '*mangle',
+ ':PREROUTING ACCEPT [241:39722]',
+ ':INPUT ACCEPT [230:39282]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [266:26558]',
+ ':POSTROUTING ACCEPT [267:26590]',
+ '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill',
+ 'COMMIT',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ ]
+
in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
@@ -3283,6 +3673,8 @@ class IptablesFirewallTestCase(test.TestCase):
return '\n'.join(self.in_filter_rules), None
if cmd == ('iptables-save', '-c', '-t', 'nat'):
return '\n'.join(self.in_nat_rules), None
+ if cmd == ('iptables-save', '-c', '-t', 'mangle'):
+ return '\n'.join(self.in_mangle_rules), None
if cmd == ('iptables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
@@ -3512,7 +3904,7 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection = Mock()
- self.fw = firewall.NWFilterFirewall(
+ self.fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(),
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
@@ -3557,7 +3949,7 @@ class NWFilterTestCase(test.TestCase):
'instance_type_id': 1})
def _create_instance_type(self, params=None):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -3622,7 +4014,7 @@ class NWFilterTestCase(test.TestCase):
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
- self.security_group.id)
+ self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
@@ -3639,7 +4031,7 @@ class NWFilterTestCase(test.TestCase):
break
_ensure_all_called(mac, allow_dhcp)
db.instance_remove_security_group(self.context, inst_uuid,
- self.security_group.id)
+ self.security_group['id'])
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['uuid'])
@@ -3657,7 +4049,7 @@ class NWFilterTestCase(test.TestCase):
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
- self.security_group.id)
+ self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
@@ -3694,8 +4086,10 @@ class LibvirtUtilsTestCase(test.TestCase):
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
def test_create_cow_image(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
rval = ('', '')
+ os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path').AndReturn(rval)
utils.execute('qemu-img', 'create', '-f', 'qcow2',
@@ -3708,7 +4102,7 @@ class LibvirtUtilsTestCase(test.TestCase):
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
- 'xen': ([True, 'phy'], [False, 'tap'], [None, 'tap']),
+ 'xen': ([True, 'phy'], [False, 'file'], [None, 'file']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
@@ -3719,7 +4113,9 @@ class LibvirtUtilsTestCase(test.TestCase):
self.assertEquals(result, expected_result)
def test_get_disk_size(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/some/path').AndReturn(('''image: 00000001
file format: raw
@@ -3878,7 +4274,11 @@ disk size: 4.4M''', ''))
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
+ def return_true(*args, **kwargs):
+ return True
+
self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, 'exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
@@ -3895,7 +4295,7 @@ class LibvirtDriverTestCase(test.TestCase):
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -3991,12 +4391,12 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance()
- """ dest is different host case """
+ # dest is different host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.2', None, None)
self.assertEquals(out, disk_info_text)
- """ dest is same host case """
+ # dest is same host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.1', None, None)
self.assertEquals(out, disk_info_text)
@@ -4013,19 +4413,19 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
- """ instance not found case """
+ # instance not found case
self.assertRaises(exception.NotFound,
self.libvirtconnection._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
- """ instance is running case """
+ # instance is running case
self.assertRaises(utils.LoopingCallDone,
self.libvirtconnection._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
- """ else case """
+ # else case
self.libvirtconnection._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
@@ -4057,7 +4457,7 @@ class LibvirtDriverTestCase(test.TestCase):
block_device_info=None):
pass
- def fake_create_domain(xml):
+ def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
@@ -4103,7 +4503,7 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_plug_vifs(instance, network_info):
pass
- def fake_create_domain(xml):
+ def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
@@ -4152,7 +4552,7 @@ class LibvirtDriverTestCase(test.TestCase):
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
- ins_ref = self._create_instance({'host': FLAGS.host})
+ ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
@@ -4168,7 +4568,7 @@ class LibvirtDriverTestCase(test.TestCase):
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
- host = 'not' + FLAGS.host
+ host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
@@ -4200,15 +4600,65 @@ class LibvirtDriverTestCase(test.TestCase):
_fake_network_info(self.stubs, 1))
+class LibvirtVolumeUsageTestCase(test.TestCase):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver
+ .get_all_volume_usage"""
+
+ def setUp(self):
+ super(LibvirtVolumeUsageTestCase, self).setUp()
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ # creating instance
+ inst = {}
+ inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ self.ins_ref = db.instance_create(self.c, inst)
+
+ # verify bootable volume device path also
+ self.bdms = [{'volume_id': 1,
+ 'device_name': '/dev/vde'},
+ {'volume_id': 2,
+ 'device_name': 'vda'}]
+
+ def test_get_all_volume_usage(self):
+ def fake_block_stats(instance_name, disk):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+
+ expected_usage = [{'volume': 1,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L},
+ {'volume': 2,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L}]
+ self.assertEqual(vol_usage, expected_usage)
+
+ def test_get_all_volume_usage_device_not_found(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError('invalid path')
+
+ self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+ self.assertEqual(vol_usage, [])
+
+
class LibvirtNonblockingTestCase(test.TestCase):
- """Test libvirt_nonblocking option"""
+ """Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
- """Test bug 962840"""
+ # Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
diff --git a/nova/tests/test_libvirt_config.py b/nova/tests/test_libvirt_config.py
index a00d5b572..5eafba841 100644
--- a/nova/tests/test_libvirt_config.py
+++ b/nova/tests/test_libvirt_config.py
@@ -15,18 +15,15 @@
# under the License.
from lxml import etree
-from lxml import objectify
from nova import test
-
+from nova.tests import matchers
from nova.virt.libvirt import config
class LibvirtConfigBaseTest(test.TestCase):
def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
- expected = etree.tostring(objectify.fromstring(expectedXmlstr))
- actual = etree.tostring(objectify.fromstring(actualXmlstr))
- self.assertEqual(expected, actual)
+ self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
class LibvirtConfigTest(LibvirtConfigBaseTest):
@@ -65,6 +62,7 @@ class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
xmlin = """
<capabilities>
<host>
+ <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
<cpu>
<arch>x86_64</arch>
<model>Opteron_G3</model>
@@ -88,6 +86,7 @@ class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
obj.parse_str(xmlin)
self.assertEqual(type(obj.host), config.LibvirtConfigCapsHost)
+ self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
xmlout = obj.to_xml()
@@ -303,6 +302,85 @@ class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
""")
+class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSMBIOS()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <smbios mode="sysinfo"/>
+ """)
+
+
+class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios"/>
+ """)
+
+ def test_config_bios(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.bios_version = "6.6.6"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ <entry name="version">6.6.6</entry>
+ </bios>
+ </sysinfo>
+ """)
+
+ def test_config_system(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_version = "6.6.6"
+ obj.system_serial = "123456"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="version">6.6.6</entry>
+ <entry name="serial">123456</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+ def test_config_mixed(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+
class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
def test_config_file(self):
@@ -366,7 +444,7 @@ class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="qcow2"/>
- <source protocol="iscsi" name="foo.bar.com"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
<target bus="ide" dev="/dev/hda"/>
</disk>""")
@@ -387,7 +465,7 @@ class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
- <source protocol="rbd" name="pool/image"/>
+ <source name="pool/image" protocol="rbd"/>
<auth username="foo">
<secret type="ceph"
uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
@@ -468,12 +546,14 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "vnet0"
+ obj.driver_name = "vhost"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="ethernet">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
+ <driver name="vhost"/>
<target dev="vnet0"/>
</interface>""")
@@ -483,6 +563,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
+ obj.target_dev = "tap12345678"
obj.filtername = "clean-traffic"
obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
@@ -492,6 +573,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
+ <target dev="tap12345678"/>
<filterref filter="clean-traffic">
<parameter name="IP" value="192.168.122.1"/>
</filterref>
@@ -503,6 +585,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
+ obj.target_dev = "tap12345678"
obj.vporttype = "openvswitch"
obj.vportparams.append({"key": "instanceid", "value": "foobar"})
@@ -512,6 +595,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
+ <target dev="tap12345678"/>
<virtualport type="openvswitch">
<parameters instanceid="foobar"/>
</virtualport>
@@ -522,6 +606,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.net_type = "direct"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
+ obj.target_dev = "tap12345678"
obj.source_dev = "eth0"
obj.vporttype = "802.1Qbh"
@@ -530,7 +615,8 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<interface type="direct">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
- <source mode="private" dev="eth0"/>
+ <source dev="eth0" mode="private"/>
+ <target dev="tap12345678"/>
<virtualport type="802.1Qbh"/>
</interface>""")
@@ -626,6 +712,8 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
obj.os_loader = '/usr/lib/xen/boot/hvmloader'
obj.os_root = "root=xvda"
obj.os_cmdline = "console=xvc0"
+ obj.acpi = True
+ obj.apic = True
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
@@ -648,6 +736,10 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
<cmdline>console=xvc0</cmdline>
<root>root=xvda</root>
</os>
+ <features>
+ <acpi/>
+ <apic/>
+ </features>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
@@ -665,6 +757,13 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
obj.os_boot_dev = "hd"
+ obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
+ obj.acpi = True
+ obj.apic = True
+
+ obj.sysinfo = config.LibvirtConfigGuestSysinfo()
+ obj.sysinfo.bios_vendor = "Acme"
+ obj.sysinfo.system_version = "1.0.0"
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
@@ -681,10 +780,23 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
<name>demo</name>
<memory>104857600</memory>
<vcpu>2</vcpu>
+ <sysinfo type='smbios'>
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="version">1.0.0</entry>
+ </system>
+ </sysinfo>
<os>
<type>linux</type>
<boot dev="hd"/>
+ <smbios mode="sysinfo"/>
</os>
+ <features>
+ <acpi/>
+ <apic/>
+ </features>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
diff --git a/nova/tests/test_libvirt_utils.py b/nova/tests/test_libvirt_utils.py
new file mode 100644
index 000000000..60f0682a8
--- /dev/null
+++ b/nova/tests/test_libvirt_utils.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 NTT Data
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova import test
+from nova import utils
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class LibvirtUtilsTestCase(test.TestCase):
+ def test_get_disk_type(self):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+blah BLAH: bb
+"""
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(utils, 'execute')
+ os.path.exists(path).AndReturn(True)
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((example_output, ''))
+ self.mox.ReplayAll()
+ disk_type = libvirt_utils.get_disk_type(path)
+ self.assertEqual(disk_type, 'raw')
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index ca52f14ed..11ffa020f 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -16,13 +16,13 @@
from lxml import etree
-from nova import flags
+from nova.openstack.common import cfg
from nova import test
from nova import utils
-from nova.virt.libvirt import config
+from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class LibvirtVifTestCase(test.TestCase):
@@ -38,7 +38,8 @@ class LibvirtVifTestCase(test.TestCase):
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
- 'dns1': '8.8.8.8'
+ 'dns1': '8.8.8.8',
+ 'id': 'network-id-xxx-yyy-zzz'
}
mapping = {
@@ -46,7 +47,8 @@ class LibvirtVifTestCase(test.TestCase):
'gateway_v6': net['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
- 'vif_uuid': 'vif-xxx-yyy-zzz'
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz'
}
instance = {
@@ -66,17 +68,127 @@ class LibvirtVifTestCase(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
def _get_instance_xml(self, driver):
- conf = config.LibvirtConfigGuest()
+ conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
- nic = driver.plug(self.instance, (self.net, self.mapping))
+ nic = driver.get_config(self.instance, self.net, self.mapping)
conf.add_device(nic)
return conf.to_xml()
+ def test_multiple_nics(self):
+ conf = vconfig.LibvirtConfigGuest()
+ conf.virt_type = "qemu"
+ conf.name = "fake-name"
+ conf.uuid = "fake-uuid"
+ conf.memory = 100 * 1024
+ conf.vcpus = 4
+
+ # Tests multiple nic configuration and that target_dev is
+ # set for each
+ nics = [{'net_type': 'bridge',
+ 'mac_addr': '00:00:00:00:00:0b',
+ 'source_dev': 'b_source_dev',
+ 'target_dev': 'b_target_dev'},
+ {'net_type': 'ethernet',
+ 'mac_addr': '00:00:00:00:00:0e',
+ 'source_dev': 'e_source_dev',
+ 'target_dev': 'e_target_dev'},
+ {'net_type': 'direct',
+ 'mac_addr': '00:00:00:00:00:0d',
+ 'source_dev': 'd_source_dev',
+ 'target_dev': 'd_target_dev'}]
+
+ for nic in nics:
+ nic_conf = vconfig.LibvirtConfigGuestInterface()
+ nic_conf.net_type = nic['net_type']
+ nic_conf.target_dev = nic['target_dev']
+ nic_conf.mac_addr = nic['mac_addr']
+ nic_conf.source_dev = nic['source_dev']
+ conf.add_device(nic_conf)
+
+ xml = conf.to_xml()
+ doc = etree.fromstring(xml)
+ for nic in nics:
+ path = "./devices/interface/[@type='%s']" % nic['net_type']
+ node = doc.find(path)
+ self.assertEqual(nic['net_type'], node.get("type"))
+ self.assertEqual(nic['mac_addr'],
+ node.find("mac").get("address"))
+ self.assertEqual(nic['target_dev'],
+ node.find("target").get("dev"))
+
+ def test_model_novirtio(self):
+ self.flags(libvirt_use_virtio_for_bridges=False,
+ libvirt_type='kvm')
+
+ d = vif.LibvirtBridgeDriver()
+ xml = self._get_instance_xml(d)
+
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ node = ret[0]
+
+ ret = node.findall("model")
+ self.assertEqual(len(ret), 0)
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+
+ def test_model_kvm(self):
+ self.flags(libvirt_use_virtio_for_bridges=True,
+ libvirt_type='kvm')
+
+ d = vif.LibvirtBridgeDriver()
+ xml = self._get_instance_xml(d)
+
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ node = ret[0]
+
+ model = node.find("model").get("type")
+ self.assertEqual(model, "virtio")
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+
+ def test_model_qemu(self):
+ self.flags(libvirt_use_virtio_for_bridges=True,
+ libvirt_type='qemu')
+
+ d = vif.LibvirtBridgeDriver()
+ xml = self._get_instance_xml(d)
+
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ node = ret[0]
+
+ model = node.find("model").get("type")
+ self.assertEqual(model, "virtio")
+ driver = node.find("driver").get("name")
+ self.assertEqual(driver, "qemu")
+
+ def test_model_xen(self):
+ self.flags(libvirt_use_virtio_for_bridges=True,
+ libvirt_type='xen')
+
+ d = vif.LibvirtBridgeDriver()
+ xml = self._get_instance_xml(d)
+
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ node = ret[0]
+
+ ret = node.findall("model")
+ self.assertEqual(len(ret), 0)
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+
def test_bridge_driver(self):
d = vif.LibvirtBridgeDriver()
xml = self._get_instance_xml(d)
@@ -91,8 +203,6 @@ class LibvirtVifTestCase(test.TestCase):
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
- d.unplug(None, (self.net, self.mapping))
-
def test_ovs_ethernet_driver(self):
d = vif.LibvirtOpenVswitchDriver()
xml = self._get_instance_xml(d)
@@ -109,8 +219,6 @@ class LibvirtVifTestCase(test.TestCase):
script = node.find("script").get("path")
self.assertEquals(script, "")
- d.unplug(None, (self.net, self.mapping))
-
def test_ovs_virtualport_driver(self):
d = vif.LibvirtOpenVswitchVirtualPortDriver()
xml = self._get_instance_xml(d)
@@ -122,7 +230,7 @@ class LibvirtVifTestCase(test.TestCase):
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, FLAGS.libvirt_ovs_bridge)
+ self.assertEqual(br_name, "br0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
vp = node.find("virtualport")
@@ -135,7 +243,6 @@ class LibvirtVifTestCase(test.TestCase):
iface_id_found = True
self.assertTrue(iface_id_found)
- d.unplug(None, (self.net, self.mapping))
def test_quantum_bridge_ethernet_driver(self):
d = vif.QuantumLinuxBridgeVIFDriver()
@@ -145,15 +252,13 @@ class LibvirtVifTestCase(test.TestCase):
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
- self.assertEqual(node.get("type"), "ethernet")
+ self.assertEqual(node.get("type"), "bridge")
dev_name = node.find("target").get("dev")
self.assertTrue(dev_name.startswith("tap"))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
- script = node.find("script").get("path")
- self.assertEquals(script, "")
-
- d.unplug(None, (self.net, self.mapping))
+ br_name = node.find("source").get("bridge")
+ self.assertEqual(br_name, "br0")
def test_quantum_hybrid_driver(self):
d = vif.LibvirtHybridOVSBridgeDriver()
@@ -168,5 +273,3 @@ class LibvirtVifTestCase(test.TestCase):
self.assertEqual(br_name, self.net['bridge'])
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
-
- d.unplug(None, (self.net, self.mapping))
diff --git a/nova/tests/test_loadables.py b/nova/tests/test_loadables.py
new file mode 100644
index 000000000..6d16b9fa8
--- /dev/null
+++ b/nova/tests/test_loadables.py
@@ -0,0 +1,113 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Loadable class handling.
+"""
+
+from nova import exception
+from nova import test
+from nova.tests import fake_loadables
+
+
+class LoadablesTestCase(test.TestCase):
+ def setUp(self):
+ super(LoadablesTestCase, self).setUp()
+ self.fake_loader = fake_loadables.FakeLoader()
+ # The name that we imported above for testing
+ self.test_package = 'nova.tests.fake_loadables'
+
+ def test_loader_init(self):
+ self.assertEqual(self.fake_loader.package, self.test_package)
+ # Test the path of the module
+ ending_path = '/' + self.test_package.replace('.', '/')
+ self.assertTrue(self.fake_loader.path.endswith(ending_path))
+ self.assertEqual(self.fake_loader.loadable_cls_type,
+ fake_loadables.FakeLoadable)
+
+ def _compare_classes(self, classes, expected):
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(set(class_names), set(expected))
+
+ def test_get_all_classes(self):
+ classes = self.fake_loader.get_all_classes()
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_underscore(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2._FakeLoadableSubClass7']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type1(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass4',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type2(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass8']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_one_function(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_two_functions(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_function_including_invalids(self):
+ # When using a method, no checking is done on valid classes.
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_invalid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ '_FakeLoadableSubClass3',
+ 'FakeLoadableSubClass4',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
diff --git a/nova/tests/test_matchers.py b/nova/tests/test_matchers.py
new file mode 100644
index 000000000..be058aa7d
--- /dev/null
+++ b/nova/tests/test_matchers.py
@@ -0,0 +1,351 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+from testtools.tests.matchers import helpers
+
+from nova.tests import matchers
+
+
+class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictMatches(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ str_examples = [
+ ("DictMatches({'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Keys in d1 and not d2: set(['foo', 'baz', 'cat'])."
+ " Keys in d2 and not d1: set([])", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictListMatches(
+ [{'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'},
+ ])
+
+ matches_matches = [
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ matches_mismatches = [
+ [],
+ {},
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'cat': 'yorkie'}],
+ [{'foo': 'bop', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ str_examples = [
+ ("DictListMatches([{'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n"
+ " {'dog': 'yorkie'}])",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: True d2: False",
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ matches_matcher),
+ ]
+
+
+class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.IsSubDictOf(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux'}
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None},
+ ]
+
+ str_examples = [
+ ("IsSubDictOf({'foo': 'bar', 'baz': 'DONTCARE',"
+ " 'cat': {'fluffy': False, 'tabby': True}})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestXMLMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.XMLMatches("""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="DONTCARE"/>
+ <children>
+ <!--This is a comment-->
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>DONTCARE</child3>
+ <?spam processing instruction?>
+ </children>
+</root>""")
+
+ matches_matches = ["""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key2="spam" key1="spam"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children><child1>child 1</child1>
+<child2>child 2</child2>
+<child3>blah</child3>
+ </children>
+</root>""",
+ ]
+
+ matches_mismatches = ["""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>mismatch text</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key3="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="quux" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child4>child 4</child4>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ <child4>child 4</child4>
+ </children>
+</root>""",
+ ]
+
+ str_examples = [
+ ("XMLMatches('<?xml version=\"1.0\"?>\\n"
+ "<root>\\n"
+ " <text>some text here</text>\\n"
+ " <text>some other text here</text>\\n"
+ " <attrs key1=\"spam\" key2=\"DONTCARE\"/>\\n"
+ " <children>\\n"
+ " <!--This is a comment-->\\n"
+ " <child1>child 1</child1>\\n"
+ " <child2>child 2</child2>\\n"
+ " <child3>DONTCARE</child3>\\n"
+ " <?spam processing instruction?>\\n"
+ " </children>\\n"
+ "</root>')", matches_matcher),
+ ]
+
+ describe_examples = [
+ ("/root/text[1]: XML text value mismatch: expected text value: "
+ "'some other text here'; actual value: 'mismatch text'",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>mismatch text</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/attrs[2]: XML attributes mismatch: keys only in expected: "
+ "key2; keys only in actual: key3",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key3="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/attrs[2]: XML attribute value mismatch: expected value of "
+ "attribute key1: 'spam'; actual value: 'quux'",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="quux" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML tag mismatch at index 1: expected tag "
+ "<child2>; actual tag <child4>",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child4>child 4</child4>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML expected child element <child3> not "
+ "present at index 2",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML unexpected child element <child4> "
+ "present at index 3",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ <child4>child 4</child4>
+ </children>
+</root>""", matches_matcher),
+ ]
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index a8c78d4f5..f15d71633 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -27,16 +27,17 @@ import webob
from nova.api.metadata import base
from nova.api.metadata import handler
+from nova.api.metadata import password
from nova import block_device
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
-from nova import flags
from nova.network import api as network_api
+from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_network
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
USER_DATA_STRING = ("This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
@@ -83,18 +84,23 @@ def fake_InstanceMetadata(stubs, inst_data, address=None,
def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
- fake_get_metadata=None, headers=None):
+ fake_get_metadata=None, headers=None,
+ fake_get_metadata_by_instance_id=None):
- def get_metadata(address):
+ def get_metadata_by_remote_address(address):
return mdinst
app = handler.MetadataRequestHandler()
if fake_get_metadata is None:
- fake_get_metadata = get_metadata
+ fake_get_metadata = get_metadata_by_remote_address
if stubs:
- stubs.Set(app, 'get_metadata', fake_get_metadata)
+ stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
+
+ if fake_get_metadata_by_instance_id:
+ stubs.Set(app, 'get_metadata_by_instance_id',
+ fake_get_metadata_by_instance_id)
request = webob.Request.blank(relpath)
request.remote_addr = address
@@ -142,10 +148,10 @@ class MetadataTestCase(test.TestCase):
md = fake_InstanceMetadata(self.stubs, copy(self.instance))
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
- "%s.%s" % (self.instance['hostname'], FLAGS.dhcp_domain))
+ "%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
- """Make sure that _format_instance_mappings works"""
+ # Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = {'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
@@ -314,6 +320,14 @@ class OpenStackMetadataTestCase(test.TestCase):
for key, val in extra.iteritems():
self.assertEqual(mddict[key], val)
+ def test_password(self):
+ # make sure extra_md makes it through to metadata
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ result = mdinst.lookup("/openstack/latest/password")
+ self.assertEqual(result, password.handle_password)
+
def test_userdata(self):
inst = copy(self.instance)
mdinst = fake_InstanceMetadata(self.stubs, inst)
@@ -328,11 +342,34 @@ class OpenStackMetadataTestCase(test.TestCase):
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
- self.assertFalse('user-data' in mdinst.lookup("/openstack/2012-08-10"))
+ self.assertFalse('user_data' in mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
+ def test_random_seed(self):
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # verify that 2013-04-04 has the 'random' field
+ mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
+ mddict = json.loads(mdjson)
+
+ self.assertTrue("random_seed" in mddict)
+ self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
+
+ # verify that older version do not have it
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ self.assertFalse("random_seed" in json.loads(mdjson))
+
+ def test_no_dashes_in_metadata(self):
+ # top level entries in meta_data should not contain '-' in their name
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+ mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json"))
+
+ self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
+
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
@@ -346,6 +383,20 @@ class MetadataHandlerTestCase(test.TestCase):
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
+ def test_callable(self):
+
+ def verify(req, meta_data):
+ self.assertTrue(isinstance(meta_data, CallableMD))
+ return "foo"
+
+ class CallableMD(object):
+ def lookup(self, path_info):
+ return verify
+
+ response = fake_request(self.stubs, CallableMD(), "/bar")
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, "foo")
+
def test_root(self):
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
@@ -405,3 +456,107 @@ class MetadataHandlerTestCase(test.TestCase):
fake_get_metadata=fake_get_metadata,
headers=None)
self.assertEqual(response.status_int, 500)
+
+ def test_user_data_with_quantum_instance_id(self):
+ expected_instance_id = 'a-b-c-d'
+
+ def fake_get_metadata(instance_id, remote_address):
+ if instance_id == expected_instance_id:
+ return self.mdinst
+ else:
+ # raise the exception to aid with 500 response code test
+ raise Exception("Expected instance_id of %s, got %s" %
+ (expected_instance_id, instance_id))
+
+ signed = ('d98d0dd53b026a24df2c06b464ffa5da'
+ 'db922ae41af7bd3ecc3cae75aef65771')
+
+ # try a request with service disabled
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Instance-ID-Signature': signed})
+ self.assertEqual(response.status_int, 200)
+
+ # now enable the service
+
+ self.flags(service_quantum_metadata_proxy=True)
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body,
+ base64.b64decode(self.instance['user_data']))
+
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Instance-ID-Signature': ''})
+
+ self.assertEqual(response.status_int, 403)
+
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Instance-ID': 'z-z-z-z',
+ 'X-Instance-ID-Signature': '81f42e3fc77ba3a3e8d83142746e0'
+ '8387b96cbc5bd2474665192d2ec28'
+ '8ffb67'})
+ self.assertEqual(response.status_int, 500)
+
+
+class MetadataPasswordTestCase(test.TestCase):
+ def setUp(self):
+ super(MetadataPasswordTestCase, self).setUp()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ spectacular=True)
+ self.instance = copy(INSTANCES[0])
+ self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
+ address=None, sgroups=None)
+
+ def test_get_password(self):
+ request = webob.Request.blank('')
+ self.mdinst.password = 'foo'
+ result = password.handle_password(request, self.mdinst)
+ self.assertEqual(result, 'foo')
+
+ def test_bad_method(self):
+ request = webob.Request.blank('')
+ request.method = 'PUT'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ password.handle_password, request, self.mdinst)
+
+ def _try_set_password(self, val='bar'):
+ request = webob.Request.blank('')
+ request.method = 'POST'
+ request.body = val
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ lambda *a, **kw: None)
+ password.handle_password(request, self.mdinst)
+
+ def test_set_password(self):
+ self.mdinst.password = ''
+ self._try_set_password()
+
+ def test_conflict(self):
+ self.mdinst.password = 'foo'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._try_set_password)
+
+ def test_too_large(self):
+ self.mdinst.password = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._try_set_password,
+ 'a' * (password.MAX_SIZE + 1))
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 5ec91ca14..750326592 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -61,7 +61,7 @@ def _is_mysql_avail(user="openstack_citest",
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
- # intential catch all to handle exceptions even if we don't
+ # intentionally catch all to handle exceptions even if we don't
# have mysql code loaded at all.
return False
else:
@@ -77,9 +77,8 @@ def _have_mysql():
class TestMigrations(test.TestCase):
- """Test sqlalchemy-migrate migrations"""
+ """Test sqlalchemy-migrate migrations."""
- TEST_DATABASES = {}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
@@ -94,28 +93,28 @@ class TestMigrations(test.TestCase):
super(TestMigrations, self).setUp()
self.snake_walk = False
+ self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
- if not TestMigrations.TEST_DATABASES:
- if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
- cp = ConfigParser.RawConfigParser()
- try:
- cp.read(TestMigrations.CONFIG_FILE_PATH)
- defaults = cp.defaults()
- for key, value in defaults.items():
- TestMigrations.TEST_DATABASES[key] = value
- self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
- except ConfigParser.ParsingError, e:
- self.fail("Failed to read test_migrations.conf config "
- "file. Got error: %s" % e)
- else:
- self.fail("Failed to find test_migrations.conf config "
- "file.")
+ if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
+ cp = ConfigParser.RawConfigParser()
+ try:
+ cp.read(TestMigrations.CONFIG_FILE_PATH)
+ defaults = cp.defaults()
+ for key, value in defaults.items():
+ self.test_databases[key] = value
+ self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
+ except ConfigParser.ParsingError, e:
+ self.fail("Failed to read test_migrations.conf config "
+ "file. Got error: %s" % e)
+ else:
+ self.fail("Failed to find test_migrations.conf config "
+ "file.")
self.engines = {}
- for key, value in TestMigrations.TEST_DATABASES.items():
+ for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
@@ -131,8 +130,8 @@ class TestMigrations(test.TestCase):
# remove these from the list so they aren't used in the migration tests
if "mysqlcitest" in self.engines:
del self.engines["mysqlcitest"]
- if "mysqlcitest" in TestMigrations.TEST_DATABASES:
- del TestMigrations.TEST_DATABASES["mysqlcitest"]
+ if "mysqlcitest" in self.test_databases:
+ del self.test_databases["mysqlcitest"]
super(TestMigrations, self).tearDown()
def _reset_databases(self):
@@ -141,7 +140,7 @@ class TestMigrations(test.TestCase):
LOG.debug(output)
self.assertEqual(0, status)
for key, engine in self.engines.items():
- conn_string = TestMigrations.TEST_DATABASES[key]
+ conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
@@ -177,29 +176,23 @@ class TestMigrations(test.TestCase):
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
- if auth_pieces[1].strip():
- password = auth_pieces[1]
- cmd = ("touch ~/.pgpass;"
- "chmod 0600 ~/.pgpass;"
- "sed -i -e"
- "'1{s/^.*$/\*:\*:\*:%(user)s:%(password)s/};"
- "1!d' ~/.pgpass") % locals()
- execute_cmd(cmd)
- sql = ("UPDATE pg_catalog.pg_database SET datallowconn=false "
- "WHERE datname='%(database)s';") % locals()
- cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
- execute_cmd(cmd)
- sql = ("SELECT pg_catalog.pg_terminate_backend(procpid) "
- "FROM pg_catalog.pg_stat_activity "
- "WHERE datname='%(database)s';") % locals()
- cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
- execute_cmd(cmd)
+ password = auth_pieces[1].strip()
+ # note(boris-42): This file is used for authentication
+ # without password prompt.
+ createpgpass = ("echo '*:*:*:%(user)s:%(password)s' > "
+ "~/.pgpass && chmod 0600 ~/.pgpass" % locals())
+ execute_cmd(createpgpass)
+ # note(boris-42): We must create and drop database, we can't
+ # drop database wich we have connected to, so for such
+ # operations there is a special database template1.
+ sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
+ " '%(sql)s' -d template1")
sql = ("drop database if exists %(database)s;") % locals()
- cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
- execute_cmd(cmd)
+ droptable = sqlcmd % locals()
+ execute_cmd(droptable)
sql = ("create database %(database)s;") % locals()
- cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals()
- execute_cmd(cmd)
+ createtable = sqlcmd % locals()
+ execute_cmd(createtable)
def test_walk_versions(self):
"""
@@ -218,17 +211,15 @@ class TestMigrations(test.TestCase):
self.fail("Shouldn't have connected")
def test_mysql_innodb(self):
- """
- Test that table creation on mysql only builds InnoDB tables
- """
+ # Test that table creation on mysql only builds InnoDB tables
if not _have_mysql():
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
- # automaticaly in tearDown so no need to clean it up here.
+ # automatically in tearDown so no need to clean it up here.
connect_string = _mysql_get_connect_string()
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
- TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string
+ self.test_databases["mysqlcitest"] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()
@@ -274,19 +265,19 @@ class TestMigrations(test.TestCase):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version)
if snake_walk:
- self._migrate_down(engine, version - 1)
+ self._migrate_down(engine, version)
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(
- xrange(migration.INIT_VERSION + 1,
- TestMigrations.REPOSITORY.latest)):
+ xrange(migration.INIT_VERSION + 2,
+ TestMigrations.REPOSITORY.latest + 1)):
# downgrade -> upgrade -> downgrade
self._migrate_down(engine, version)
if snake_walk:
- self._migrate_up(engine, version + 1)
+ self._migrate_up(engine, version)
self._migrate_down(engine, version)
def _migrate_down(self, engine, version):
@@ -305,202 +296,94 @@ class TestMigrations(test.TestCase):
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
- def test_migration_98(self):
- """Test that migration 98 runs
+ def test_migration_146(self):
+ name = 'name'
+ az = 'custom_az'
+
+ def _145_check():
+ agg = aggregates.select(aggregates.c.id == 1).execute().first()
+ self.assertEqual(name, agg.name)
+ self.assertEqual(az, agg.availability_zone)
- This test exists to prove bug 1047633 has been fixed
- """
for key, engine in self.engines.items():
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 97)
-
- # Set up a single volume, values don't matter
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 145)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
- vol_id = '9db3c2e5-8cac-4e94-9e6c-b5f750736727'
- volumes.insert().values(id=vol_id).execute()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 98)
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 97)
+ aggregates = sqlalchemy.Table('aggregates', metadata,
+ autoload=True)
- def test_migration_91(self):
- """Test that migration 91 works correctly.
+ aggregates.insert().values(id=1, availability_zone=az,
+ aggregate_name=1, name=name).execute()
- This test prevents regression of bugs 1052244 and 1052220.
- """
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 90)
+ _145_check()
- vol1_id = '10'
- vol1_uuid = '9db3c2e5-8cac-4e94-9e6c-b5f750736727'
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
- vol2_id = '11'
- vol2_uuid = 'fb17fb5a-ca3d-4bba-8903-fc776ea81d78'
+ aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
+ metadata, autoload=True)
+ metadata = aggregate_metadata.select(aggregate_metadata.c.
+ aggregate_id == 1).execute().first()
+ self.assertEqual(az, metadata['value'])
- snap_id = '7'
- snap_uuid = 'a87e5108-8a2b-4c89-be96-0e8760db2c6a'
+ migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145)
+ _145_check()
- inst_id = '0ec45d38-aefd-4c42-a209-361e848240b7'
+ def test_migration_147(self):
+ az = 'test_zone'
+ host1 = 'compute-host1'
+ host2 = 'compute-host2'
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
+ def _146_check():
+ service = services.select(services.c.id == 1).execute().first()
+ self.assertEqual(az, service.availability_zone)
+ self.assertEqual(host1, service.host)
+ service = services.select(services.c.id == 2).execute().first()
+ self.assertNotEqual(az, service.availability_zone)
+ service = services.select(services.c.id == 3).execute().first()
+ self.assertEqual(az, service.availability_zone)
+ self.assertEqual(host2, service.host)
- instances = sqlalchemy.Table('instances', metadata, autoload=True)
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
- sm_flavors = sqlalchemy.Table(
- 'sm_flavors', metadata, autoload=True)
- sm_backend_config = sqlalchemy.Table(
- 'sm_backend_config', metadata, autoload=True)
- sm_volume = sqlalchemy.Table(
- 'sm_volume', metadata, autoload=True)
- volume_mappings = sqlalchemy.Table(
- 'volume_id_mappings', metadata, autoload=True)
- iscsi_targets = sqlalchemy.Table(
- 'iscsi_targets', metadata, autoload=True)
- volume_metadata = sqlalchemy.Table(
- 'volume_metadata', metadata, autoload=True)
- snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
- snapshot_mappings = sqlalchemy.Table(
- 'snapshot_id_mappings', metadata, autoload=True)
- block_device_mapping = sqlalchemy.Table(
- 'block_device_mapping', metadata, autoload=True)
-
- volumes.insert().values(id=vol1_id).execute()
- volume_mappings.insert() \
- .values(id=vol1_id, uuid=vol1_uuid).execute()
- snapshots.insert().values(id=snap_id, volume_id=vol1_id).execute()
- snapshot_mappings.insert() \
- .values(id=snap_id, uuid=snap_uuid).execute()
- volumes.insert().values(id=vol2_id, snapshot_id=snap_id).execute()
- volume_mappings.insert() \
- .values(id=vol2_id, uuid=vol2_uuid).execute()
- sm_flavors.insert().values(id=7).execute()
- sm_backend_config.insert().values(id=7, flavor_id=7).execute()
- sm_volume.insert().values(id=vol1_id, backend_id=7).execute()
- volume_metadata.insert().values(id=7, volume_id=vol1_id).execute()
- iscsi_targets.insert().values(id=7, volume_id=vol1_id).execute()
- instances.insert().values(id=7, uuid=inst_id).execute()
- block_device_mapping.insert()\
- .values(id=7, volume_id=vol1_id, instance_uuid=inst_id) \
- .execute()
-
- vols = volumes.select().execute().fetchall()
- self.assertEqual(set([vol.id for vol in vols]),
- set([vol1_id, vol2_id]))
- self.assertEqual(snap_id, vols[1].snapshot_id)
-
- query = volume_metadata.select(volume_metadata.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = iscsi_targets.select(iscsi_targets.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = block_device_mapping.select(block_device_mapping.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- snaps = sqlalchemy.select([snapshots.c.id]).execute().fetchall()
- self.assertEqual(set([snap.id for snap in snaps]),
- set([snap_id]))
-
- sm_vols = sqlalchemy.select([sm_volume.c.id]).execute().fetchall()
- self.assertEqual(set([sm_vol.id for sm_vol in sm_vols]),
- set([vol1_id]))
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 91)
-
- vols = volumes.select().execute().fetchall()
- self.assertEqual(set([vol.id for vol in vols]),
- set([vol1_uuid, vol2_uuid]))
- self.assertEqual(snap_uuid, vols[1].snapshot_id)
-
- query = volume_metadata.select(volume_metadata.c.id == 7)
- self.assertEqual(vol1_uuid, query.execute().fetchone().volume_id)
-
- query = iscsi_targets.select(iscsi_targets.c.id == 7)
- self.assertEqual(vol1_uuid, query.execute().fetchone().volume_id)
-
- query = block_device_mapping.select(block_device_mapping.c.id == 7)
- self.assertEqual(vol1_uuid, query.execute().fetchone().volume_id)
-
- snaps = sqlalchemy.select([snapshots.c.id]).execute().fetchall()
- self.assertEqual(set([snap.id for snap in snaps]),
- set([snap_uuid]))
-
- sm_vols = sqlalchemy.select([sm_volume.c.id]).execute().fetchall()
- self.assertEqual(set([sm_vol.id for sm_vol in sm_vols]),
- set([vol1_uuid]))
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 90)
-
- vols = volumes.select().execute().fetchall()
- self.assertEqual(set([vol.id for vol in vols]),
- set([vol1_id, vol2_id]))
- self.assertEqual(snap_id, vols[1].snapshot_id)
-
- query = volume_metadata.select(volume_metadata.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = iscsi_targets.select(iscsi_targets.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = block_device_mapping.select(block_device_mapping.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- snaps = sqlalchemy.select([snapshots.c.id]).execute().fetchall()
- self.assertEqual(set([snap.id for snap in snaps]),
- set([snap_id]))
-
- sm_vols = sqlalchemy.select([sm_volume.c.id]).execute().fetchall()
- self.assertEqual(set([sm_vol.id for sm_vol in sm_vols]),
- set([vol1_id]))
-
- def test_migration_111(self):
for key, engine in self.engines.items():
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 110)
-
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
- aggregate_hosts = sqlalchemy.Table('aggregate_hosts', metadata,
- autoload=True)
- host = 'host'
- aggregate_hosts.insert().values(id=1,
- aggregate_id=1, host=host).execute()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 111)
- agg = sqlalchemy.select([aggregate_hosts.c.host]).execute().first()
- self.assertEqual(host, agg.host)
- aggregate_hosts.insert().values(id=2,
- aggregate_id=2, host=host).execute()
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 111)
- agg = sqlalchemy.select([aggregate_hosts.c.host]).execute().first()
- self.assertEqual(host, agg.host)
- def test_migration_133(self):
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 132)
-
- # Set up a single volume, values don't matter
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- aggregates = sqlalchemy.Table('aggregates', metadata,
+ #populate service table
+ services = sqlalchemy.Table('services', metadata,
autoload=True)
- name = 'name'
- aggregates.insert().values(id=1, availability_zone='nova',
- aggregate_name=1, name=name).execute()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 133)
- aggregates.insert().values(id=2, availability_zone='nova',
- aggregate_name=2, name=name).execute()
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 132)
- agg = sqlalchemy.select([aggregates.c.name]).execute().first()
- self.assertEqual(name, agg.name)
+ services.insert().values(id=1, host=host1,
+ binary='nova-compute', topic='compute', report_count=0,
+ availability_zone=az).execute()
+ services.insert().values(id=2, host='sched-host',
+ binary='nova-scheduler', topic='scheduler', report_count=0,
+ availability_zone='ignore_me').execute()
+ services.insert().values(id=3, host=host2,
+ binary='nova-compute', topic='compute', report_count=0,
+ availability_zone=az).execute()
+
+ _146_check()
+
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 147)
+
+ # check aggregate metadata
+ aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
+ metadata, autoload=True)
+ aggregate_hosts = sqlalchemy.Table('aggregate_hosts',
+ metadata, autoload=True)
+ metadata = aggregate_metadata.select(aggregate_metadata.c.
+ aggregate_id == 1).execute().first()
+ self.assertEqual(az, metadata['value'])
+ self.assertEqual(aggregate_hosts.select(
+ aggregate_hosts.c.aggregate_id == 1).execute().
+ first().host, host1)
+ blank = [h for h in aggregate_hosts.select(
+ aggregate_hosts.c.aggregate_id == 2).execute()]
+ self.assertEqual(blank, [])
+
+ migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146)
+
+ _146_check()
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 4e989f315..6732c4007 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -14,13 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import errno
import glob
import os
-import select
-
-from eventlet import greenpool
-from eventlet import greenthread
from nova import exception
from nova import test
@@ -32,6 +27,8 @@ class ExceptionTestCase(test.TestCase):
raise exc()
def test_exceptions_raise(self):
+ # NOTE(dprince): disable format errors since we are not passing kwargs
+ self.flags(fatal_exception_format_errors=False)
for name in dir(exception):
exc = getattr(exception, name)
if isinstance(exc, type):
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
index 5abe51486..a300028a0 100644
--- a/nova/tests/test_notifications.py
+++ b/nova/tests/test_notifications.py
@@ -24,17 +24,18 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
-from nova import flags
from nova.network import api as network_api
from nova import notifications
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova import test
from nova.tests import fake_network
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class NotificationsTestCase(test.TestCase):
@@ -53,6 +54,8 @@ class NotificationsTestCase(test.TestCase):
fake_get_nw_info)
fake_network.set_stub_network_methods(self.stubs)
+ notifier_api._reset_drivers()
+ self.addCleanup(notifier_api._reset_drivers)
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager',
@@ -66,10 +69,6 @@ class NotificationsTestCase(test.TestCase):
self.instance = self._wrapped_create()
- def tearDown(self):
- notifier_api._reset_drivers()
- super(NotificationsTestCase, self).tearDown()
-
def _wrapped_create(self, params=None):
inst = {}
inst['image_ref'] = 1
@@ -82,6 +81,7 @@ class NotificationsTestCase(test.TestCase):
inst['access_ip_v4'] = '1.2.3.4'
inst['access_ip_v6'] = 'feed:5eed'
inst['display_name'] = 'test_instance'
+ inst['hostname'] = 'test_instance_hostname'
if params:
inst.update(params)
return db.instance_create(self.context, inst)
@@ -211,6 +211,7 @@ class NotificationsTestCase(test.TestCase):
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
display_name = self.instance["display_name"]
+ hostname = self.instance["hostname"]
self.assertEquals(vm_states.BUILDING, payload["old_state"])
self.assertEquals(vm_states.ACTIVE, payload["state"])
@@ -219,6 +220,7 @@ class NotificationsTestCase(test.TestCase):
self.assertEquals(payload["access_ip_v4"], access_ip_v4)
self.assertEquals(payload["access_ip_v6"], access_ip_v6)
self.assertEquals(payload["display_name"], display_name)
+ self.assertEquals(payload["hostname"], hostname)
def test_task_update_with_states(self):
self.flags(notify_on_state_change="vm_and_task_state")
@@ -232,6 +234,7 @@ class NotificationsTestCase(test.TestCase):
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
display_name = self.instance["display_name"]
+ hostname = self.instance["hostname"]
self.assertEquals(vm_states.BUILDING, payload["old_state"])
self.assertEquals(vm_states.BUILDING, payload["state"])
@@ -240,6 +243,7 @@ class NotificationsTestCase(test.TestCase):
self.assertEquals(payload["access_ip_v4"], access_ip_v4)
self.assertEquals(payload["access_ip_v6"], access_ip_v6)
self.assertEquals(payload["display_name"], display_name)
+ self.assertEquals(payload["hostname"], hostname)
def test_update_no_service_name(self):
notifications.send_update_with_states(self.context, self.instance,
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
index ec5d452a5..ff316826a 100644
--- a/nova/tests/test_nova_manage.py
+++ b/nova/tests/test_nova_manage.py
@@ -360,13 +360,13 @@ class ProjectCommandsTestCase(test.TestCase):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
- key='volumes',
+ key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
- self.assertEquals(('volumes: unlimited' in result), True)
+ self.assertEquals(('instances: unlimited' in result), True)
def test_quota_update_invalid_key(self):
self.assertRaises(SystemExit,
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index dc615bf5d..1029e0c2c 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -14,6 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ConfigParser
+import logging
+import logging.handlers
import os
import subprocess
@@ -43,58 +46,70 @@ class RootwrapTestCase(test.TestCase):
def test_RegExpFilter_reject(self):
usercmd = ["ls", "root"]
- filtermatch = wrapper.match_filter(self.filters, usercmd)
- self.assertTrue(filtermatch is None)
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, self.filters, usercmd)
def test_missing_command(self):
valid_but_missing = ["foo_bar_not_exist"]
invalid = ["foo_bar_not_exist_and_not_matched"]
- filtermatch = wrapper.match_filter(self.filters, valid_but_missing)
- self.assertTrue(filtermatch is not None)
- filtermatch = wrapper.match_filter(self.filters, invalid)
- self.assertTrue(filtermatch is None)
-
- def test_DnsmasqFilter(self):
- usercmd = ['env', 'FLAGFILE=A', 'NETWORK_ID=foobar', 'dnsmasq', 'foo']
- f = filters.DnsmasqFilter("/usr/bin/dnsmasq", "root")
+ self.assertRaises(wrapper.FilterMatchNotExecutable,
+ wrapper.match_filter, self.filters, valid_but_missing)
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, self.filters, invalid)
+
+ def _test_DnsmasqFilter(self, filter_class, config_file_arg):
+ usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar',
+ 'dnsmasq', 'foo']
+ f = filter_class("/usr/bin/dnsmasq", "root")
self.assertTrue(f.match(usercmd))
self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo'])
env = f.get_environment(usercmd)
- self.assertEqual(env.get('FLAGFILE'), 'A')
+ self.assertEqual(env.get(config_file_arg), 'A')
self.assertEqual(env.get('NETWORK_ID'), 'foobar')
+ def test_DnsmasqFilter(self):
+ self._test_DnsmasqFilter(filters.DnsmasqFilter, 'CONFIG_FILE')
+
+ def test_DeprecatedDnsmasqFilter(self):
+ self._test_DnsmasqFilter(filters.DeprecatedDnsmasqFilter, 'FLAGFILE')
+
def test_KillFilter(self):
if not os.path.exists("/proc/%d" % os.getpid()):
self.skipTest("Test requires /proc filesystem (procfs)")
p = subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
- f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
- f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
- usercmd = ['kill', '-ALRM', p.pid]
- # Incorrect signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- # Providing matching signal should be allowed
- usercmd = ['kill', '-9', p.pid]
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
-
- f = filters.KillFilter("root", "/bin/cat")
- f2 = filters.KillFilter("root", "/usr/bin/cat")
- usercmd = ['kill', os.getpid()]
- # Our own PID does not match /bin/sleep, so it should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', 999999]
- # Nonexistent PID should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should work
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+ try:
+ f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
+ f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
+ usercmd = ['kill', '-ALRM', p.pid]
+ # Incorrect signal should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', p.pid]
+ # Providing no signal should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ # Providing matching signal should be allowed
+ usercmd = ['kill', '-9', p.pid]
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+
+ f = filters.KillFilter("root", "/bin/cat")
+ f2 = filters.KillFilter("root", "/usr/bin/cat")
+ usercmd = ['kill', os.getpid()]
+ # Our own PID does not match /bin/sleep, so it should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', 999999]
+ # Nonexistent PID should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', p.pid]
+ # Providing no signal should work
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+ finally:
+ # Terminate the "cat" process and wait for it to finish
+ p.terminate()
+ p.wait()
def test_KillFilter_no_raise(self):
- """Makes sure ValueError from bug 926412 is gone"""
+ # Makes sure ValueError from bug 926412 is gone.
f = filters.KillFilter("root", "")
# Providing anything other than kill should be False
usercmd = ['notkill', 999999]
@@ -104,7 +119,7 @@ class RootwrapTestCase(test.TestCase):
self.assertFalse(f.match(usercmd))
def test_KillFilter_deleted_exe(self):
- """Makes sure deleted exe's are killed correctly"""
+ # Makes sure deleted exe's are killed correctly.
# See bug #967931.
def fake_readlink(blah):
return '/bin/commandddddd (deleted)'
@@ -124,8 +139,60 @@ class RootwrapTestCase(test.TestCase):
self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
self.assertTrue(f.match(usercmd))
+ def test_exec_dirs_search(self):
+ # This test supposes you have /bin/cat or /usr/bin/cat locally
+ f = filters.CommandFilter("cat", "root")
+ usercmd = ['cat', '/f']
+ self.assertTrue(f.match(usercmd))
+ self.assertTrue(f.get_command(usercmd, exec_dirs=['/bin',
+ '/usr/bin']) in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f']))
+
def test_skips(self):
# Check that all filters are skipped and that the last matches
usercmd = ["cat", "/"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertTrue(filtermatch is self.filters[-1])
+
+ def test_RootwrapConfig(self):
+ raw = ConfigParser.RawConfigParser()
+
+ # Empty config should raise ConfigParser.Error
+ self.assertRaises(ConfigParser.Error, wrapper.RootwrapConfig, raw)
+
+ # Check default values
+ raw.set('DEFAULT', 'filters_path', '/a,/b')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.filters_path, ['/a', '/b'])
+ self.assertEqual(config.exec_dirs, os.environ["PATH"].split(':'))
+ self.assertFalse(config.use_syslog)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_SYSLOG)
+ self.assertEqual(config.syslog_log_level, logging.ERROR)
+
+ # Check general values
+ raw.set('DEFAULT', 'exec_dirs', '/a,/x')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.exec_dirs, ['/a', '/x'])
+
+ raw.set('DEFAULT', 'use_syslog', 'oui')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'use_syslog', 'true')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertTrue(config.use_syslog)
+
+ raw.set('DEFAULT', 'syslog_log_facility', 'moo')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'syslog_log_facility', 'local0')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_LOCAL0)
+ raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_AUTH)
+
+ raw.set('DEFAULT', 'syslog_log_level', 'bar')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'syslog_log_level', 'INFO')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_level, logging.INFO)
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index 50ce67c80..37d8c5d7d 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -28,13 +28,13 @@ import tempfile
from boto import exception as boto_exception
from boto.s3 import connection as s3
-from nova import flags
from nova.objectstore import s3server
+from nova.openstack.common import cfg
from nova import test
from nova import wsgi
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('s3_host', 'nova.image.s3')
# Create a unique temporary directory. We don't delete after test to
# allow checking the contents after running tests. Users and/or tools
@@ -55,14 +55,14 @@ class S3APITestCase(test.TestCase):
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
s3_host='127.0.0.1')
- shutil.rmtree(FLAGS.buckets_path)
- os.mkdir(FLAGS.buckets_path)
+ shutil.rmtree(CONF.buckets_path)
+ os.mkdir(CONF.buckets_path)
- router = s3server.S3Application(FLAGS.buckets_path)
+ router = s3server.S3Application(CONF.buckets_path)
self.server = wsgi.Server("S3 Objectstore",
router,
- host=FLAGS.s3_host,
- port=FLAGS.s3_port)
+ host=CONF.s3_host,
+ port=0)
self.server.start()
if not boto.config.has_section('Boto'):
@@ -71,8 +71,8 @@ class S3APITestCase(test.TestCase):
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id='fake',
aws_secret_access_key='fake',
- host=FLAGS.s3_host,
- port=FLAGS.s3_port,
+ host=CONF.s3_host,
+ port=self.server.port,
is_secure=False,
calling_format=s3.OrdinaryCallingFormat())
self.conn = conn
@@ -94,11 +94,11 @@ class S3APITestCase(test.TestCase):
return True
def test_list_buckets(self):
- """Make sure we are starting with no buckets."""
+ # Make sure we are starting with no buckets.
self._ensure_no_buckets(self.conn.get_all_buckets())
def test_create_and_delete_bucket(self):
- """Test bucket creation and deletion."""
+ # Test bucket creation and deletion.
bucket_name = 'testbucket'
self.conn.create_bucket(bucket_name)
@@ -107,7 +107,7 @@ class S3APITestCase(test.TestCase):
self._ensure_no_buckets(self.conn.get_all_buckets())
def test_create_bucket_and_key_and_delete_key_again(self):
- """Test key operations on buckets."""
+ # Test key operations on buckets.
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = 'somekey'
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
new file mode 100644
index 000000000..39669967f
--- /dev/null
+++ b/nova/tests/test_periodic_tasks.py
@@ -0,0 +1,123 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+import time
+
+from nova import manager
+from nova import test
+
+
+class ManagerMetaTestCase(test.TestCase):
+ """Tests for the meta class which manages the creation of periodic tasks.
+ """
+
+ def test_meta(self):
+ class Manager(object):
+ __metaclass__ = manager.ManagerMeta
+
+ @manager.periodic_task
+ def foo(self):
+ return 'foo'
+
+ @manager.periodic_task(spacing=4)
+ def bar(self):
+ return 'bar'
+
+ @manager.periodic_task(enabled=False)
+ def baz(self):
+ return 'baz'
+
+ m = Manager()
+ self.assertEqual(2, len(m._periodic_tasks))
+ self.assertEqual(None, m._periodic_spacing['foo'])
+ self.assertEqual(4, m._periodic_spacing['bar'])
+ self.assertFalse('baz' in m._periodic_spacing)
+
+
+class Manager(test.TestCase):
+ """Tests the periodic tasks portion of the manager class."""
+
+ def test_periodic_tasks_with_idle(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=200)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ self.assertEqual(1, len(m._periodic_tasks))
+ self.assertEqual(200, m._periodic_spacing['bar'])
+
+ # Now a single pass of the periodic tasks
+ idle = m.periodic_tasks(None)
+ self.assertAlmostEqual(60, idle, 1)
+
+ def test_periodic_tasks_constant(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=0)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ idle = m.periodic_tasks(None)
+ self.assertAlmostEqual(60, idle, 1)
+
+ def test_periodic_tasks_idle_calculation(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=10)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ m.periodic_tasks(None)
+ time.sleep(0.1)
+ idle = m.periodic_tasks(None)
+ self.assertTrue(idle > 9.7)
+ self.assertTrue(idle < 9.9)
+
+ def test_periodic_tasks_disabled(self):
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=-1)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ idle = m.periodic_tasks(None)
+ self.assertAlmostEqual(60, idle, 1)
+
+ def test_external_running_here(self):
+ self.flags(run_external_periodic_tasks=True)
+
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=200, external_process_ok=True)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ self.assertEqual(1, len(m._periodic_tasks))
+
+ def test_external_running_elsewhere(self):
+ self.flags(run_external_periodic_tasks=False)
+
+ class Manager(manager.Manager):
+ @manager.periodic_task(spacing=200, external_process_ok=True)
+ def bar(self):
+ return 'bar'
+
+ m = Manager()
+ self.assertEqual(0, len(m._periodic_tasks))
diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py
index 96e6b08a9..85c2ca2cd 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/test_pipelib.py
@@ -16,12 +16,11 @@
from nova.cloudpipe import pipelib
from nova import context
from nova import crypto
-from nova import flags
+from nova.openstack.common import cfg
from nova import test
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class PipelibTest(test.TestCase):
@@ -50,7 +49,7 @@ class PipelibTest(test.TestCase):
self.cloudpipe.launch_vpn_instance(self.context)
def test_setup_security_group(self):
- group_name = "%s%s" % (self.project, FLAGS.vpn_key_suffix)
+ group_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
# First attemp, does not exist (thus its created)
res1_group = self.cloudpipe.setup_security_group(self.context)
@@ -61,7 +60,7 @@ class PipelibTest(test.TestCase):
self.assertEqual(res1_group, res2_group)
def test_setup_key_pair(self):
- key_name = "%s%s" % (self.project, FLAGS.vpn_key_suffix)
+ key_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
with utils.tempdir() as tmpdir:
self.flags(keys_path=tmpdir)
diff --git a/nova/tests/test_plugin_api_extensions.py b/nova/tests/test_plugin_api_extensions.py
index a40dd3276..77985854a 100644
--- a/nova/tests/test_plugin_api_extensions.py
+++ b/nova/tests/test_plugin_api_extensions.py
@@ -57,7 +57,7 @@ class MockEntrypoint(pkg_resources.EntryPoint):
class APITestCase(test.TestCase):
- """Test case for the plugin api extension interface"""
+ """Test case for the plugin api extension interface."""
def test_add_extension(self):
def mock_load(_s):
return TestPluginClass()
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index c0c487447..c92e1076e 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Test of Policy Engine For Nova"""
+"""Test of Policy Engine For Nova."""
import os.path
import StringIO
@@ -23,30 +23,22 @@ import urllib2
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova import utils
-FLAGS = flags.FLAGS
-
-
class PolicyFileTestCase(test.TestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
- policy.reset()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
- def tearDown(self):
- super(PolicyFileTestCase, self).tearDown()
- policy.reset()
-
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
+
self.flags(policy_file=tmpfilename)
# NOTE(uni): context construction invokes policy check to determin
@@ -56,10 +48,10 @@ class PolicyFileTestCase(test.TestCase):
action = "example:test"
with open(tmpfilename, "w") as policyfile:
- policyfile.write("""{"example:test": ""}""")
+ policyfile.write('{"example:test": ""}')
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
- policyfile.write("""{"example:test": "!"}""")
+ policyfile.write('{"example:test": "!"}')
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
@@ -70,9 +62,6 @@ class PolicyFileTestCase(test.TestCase):
class PolicyTestCase(test.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
- policy.reset()
- # NOTE(vish): preload rules to circumvent reloading from file
- policy.init()
rules = {
"true": '@',
"example:allowed": '@',
@@ -85,17 +74,10 @@ class PolicyTestCase(test.TestCase):
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
- # NOTE(vish): then overload underlying brain
- common_policy.set_rules(common_policy.Rules(
- dict((k, common_policy.parse_rule(v))
- for k, v in rules.items())))
+ self.policy.set_rules(rules)
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
- def tearDown(self):
- policy.reset()
- super(PolicyTestCase, self).tearDown()
-
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
@@ -169,8 +151,6 @@ class DefaultPolicyTestCase(test.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
- policy.reset()
- policy.init()
self.rules = {
"default": '',
@@ -187,10 +167,6 @@ class DefaultPolicyTestCase(test.TestCase):
for k, v in self.rules.items()), default_rule)
common_policy.set_rules(rules)
- def tearDown(self):
- super(DefaultPolicyTestCase, self).tearDown()
- policy.reset()
-
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 8f6f27bb0..68795e22f 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -18,20 +18,20 @@
Test suite for PowerVMDriver.
"""
-from nova.compute import power_state
from nova import context
from nova import db
-from nova import flags
from nova import test
+from nova.compute import power_state
from nova.openstack.common import log as logging
from nova.virt import images
+from nova.virt.powervm import blockdev as powervm_blockdev
+from nova.virt.powervm import common
from nova.virt.powervm import driver as powervm_driver
+from nova.virt.powervm import exception
from nova.virt.powervm import lpar
from nova.virt.powervm import operator
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
@@ -74,20 +74,6 @@ class FakeIVMOperator(object):
def remove_disk(self, disk_name):
pass
- def create_logical_volume(self, size):
- return 'lvfake01'
-
- def remove_logical_volume(self, lv_name):
- pass
-
- def copy_file_to_device(self, sourcePath, device):
- pass
-
- def copy_image_file(self, sourcePath, remotePath):
- finalPath = '/home/images/rhel62.raw.7e358754160433febd6f3318b7c9e335'
- size = 4294967296
- return finalPath, size
-
def run_cfg_dev(self, device_name):
pass
@@ -109,6 +95,26 @@ class FakeIVMOperator(object):
return 'fake-powervm'
+class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter):
+
+ def __init__(self):
+ pass
+
+ def _create_logical_volume(self, size):
+ return 'lvfake01'
+
+ def _remove_logical_volume(self, lv_name):
+ pass
+
+ def _copy_file_to_device(self, sourcePath, device, decrompress=True):
+ pass
+
+ def _copy_image_file(self, sourcePath, remotePath, decompress=False):
+ finalPath = '/home/images/rhel62.raw.7e358754160433febd6f3318b7c9e335'
+ size = 4294967296
+ return finalPath, size
+
+
def fake_get_powervm_operator():
return FakeIVMOperator()
@@ -120,6 +126,8 @@ class PowerVMDriverTestCase(test.TestCase):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(operator, 'get_powervm_operator',
fake_get_powervm_operator)
+ self.stubs.Set(operator, 'get_powervm_disk_adapter',
+ lambda: FakeBlockAdapter())
self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = self._create_instance()
@@ -153,6 +161,28 @@ class PowerVMDriverTestCase(test.TestCase):
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
+ def test_spawn_cleanup_on_fail(self):
+ # Verify on a failed spawn, we get the original exception raised.
+ # helper function
+ def raise_(ex):
+ raise ex
+
+ self.flags(powervm_img_local_path='/images/')
+ self.stubs.Set(images, 'fetch_to_raw', lambda *x, **y: None)
+ self.stubs.Set(
+ self.powervm_connection._powervm._disk_adapter,
+ 'create_volume_from_image',
+ lambda *x, **y: raise_(exception.PowerVMImageCreationFailed()))
+ self.stubs.Set(
+ self.powervm_connection._powervm, '_cleanup',
+ lambda *x, **y: raise_(Exception('This should be logged.')))
+
+ self.assertRaises(exception.PowerVMImageCreationFailed,
+ self.powervm_connection.spawn,
+ context.get_admin_context(),
+ self.instance,
+ {'id': 'ANY_ID'}, 's3cr3t', [])
+
def test_destroy(self):
self.powervm_connection.destroy(self.instance, None)
self.stubs.Set(FakeIVMOperator, 'get_lpar', lambda x, y: None)
@@ -166,3 +196,31 @@ class PowerVMDriverTestCase(test.TestCase):
self.assertEqual(info['mem'], 1024)
self.assertEqual(info['num_cpu'], 2)
self.assertEqual(info['cpu_time'], 939395)
+
+ def test_remote_utility_1(self):
+ path_one = '/some/file/'
+ path_two = '/path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
+
+ def test_remote_utility_2(self):
+ path_one = '/some/file/'
+ path_two = 'path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
+
+ def test_remote_utility_3(self):
+ path_one = '/some/file'
+ path_two = '/path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
+
+ def test_remote_utility_4(self):
+ path_one = '/some/file'
+ path_two = 'path/filename'
+ joined_path = common.aix_path_join(path_one, path_two)
+ expected_path = '/some/file/path/filename'
+ self.assertEqual(joined_path, expected_path)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 5cc5dedde..08b33e201 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -25,17 +25,17 @@ from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
import nova.tests.image.fake
-from nova import volume
-
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('scheduler_topic', 'nova.scheduler.rpcapi')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
class QuotaIntegrationTestCase(test.TestCase):
@@ -45,8 +45,6 @@ class QuotaIntegrationTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
- quota_volumes=2,
- quota_gigabytes=20,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
@@ -61,8 +59,8 @@ class QuotaIntegrationTestCase(test.TestCase):
orig_rpc_call = rpc.call
def rpc_call_wrapper(context, topic, msg, timeout=None):
- """Stub out the scheduler creating the instance entry"""
- if (topic == FLAGS.scheduler_topic and
+ """Stub out the scheduler creating the instance entry."""
+ if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry(
@@ -81,7 +79,7 @@ class QuotaIntegrationTestCase(test.TestCase):
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
@@ -91,17 +89,9 @@ class QuotaIntegrationTestCase(test.TestCase):
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
- def _create_volume(self, size=10):
- """Create a test volume"""
- vol = {}
- vol['user_id'] = self.user_id
- vol['project_id'] = self.project_id
- vol['size'] = size
- return db.volume_create(self.context, vol)['id']
-
def test_too_many_instances(self):
instance_uuids = []
- for i in range(FLAGS.quota_instances):
+ for i in range(CONF.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = instance_types.get_instance_type_by_name('m1.small')
@@ -153,7 +143,7 @@ class QuotaIntegrationTestCase(test.TestCase):
def test_too_many_metadata_items(self):
metadata = {}
- for i in range(FLAGS.quota_metadata_items + 1):
+ for i in range(CONF.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
@@ -183,45 +173,45 @@ class QuotaIntegrationTestCase(test.TestCase):
def test_max_injected_files(self):
files = []
- for i in xrange(FLAGS.quota_injected_files):
+ for i in xrange(CONF.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
- for i in xrange(FLAGS.quota_injected_files + 1):
+ for i in xrange(CONF.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
- max = FLAGS.quota_injected_file_content_bytes
+ max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
- max = FLAGS.quota_injected_file_content_bytes
+ max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
- max = FLAGS.quota_injected_file_path_bytes
+ max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
- max = FLAGS.quota_injected_file_path_bytes
+ max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_reservation_expire(self):
- timeutils.set_time_override()
+ self.useFixture(test.TimeOverride())
def assertInstancesReserved(reserved):
result = quota.QUOTAS.get_project_quotas(self.context,
@@ -291,18 +281,21 @@ class FakeDriver(object):
project_id, quota_class, defaults, usages))
return resources
- def limit_check(self, context, resources, values):
- self.called.append(('limit_check', context, resources, values))
+ def limit_check(self, context, resources, values, project_id=None):
+ self.called.append(('limit_check', context, resources,
+ values, project_id))
- def reserve(self, context, resources, deltas, expire=None):
- self.called.append(('reserve', context, resources, deltas, expire))
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
+ self.called.append(('reserve', context, resources, deltas,
+ expire, project_id))
return self.reservations
- def commit(self, context, reservations):
- self.called.append(('commit', context, reservations))
+ def commit(self, context, reservations, project_id=None):
+ self.called.append(('commit', context, reservations, project_id))
- def rollback(self, context, reservations):
- self.called.append(('rollback', context, reservations))
+ def rollback(self, context, reservations, project_id=None):
+ self.called.append(('rollback', context, reservations, project_id))
def usage_reset(self, context, resources):
self.called.append(('usage_reset', context, resources))
@@ -610,7 +603,7 @@ class QuotaEngineTestCase(test.TestCase):
test_resource2=3,
test_resource3=2,
test_resource4=1,
- )),
+ ), None),
])
def test_reserve(self):
@@ -625,6 +618,9 @@ class QuotaEngineTestCase(test.TestCase):
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
+ result3 = quota_obj.reserve(context, project_id='fake_project',
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
@@ -632,13 +628,19 @@ class QuotaEngineTestCase(test.TestCase):
test_resource2=3,
test_resource3=2,
test_resource4=1,
- ), None),
+ ), None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
- ), 3600),
+ ), 3600, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), None, 'fake_project'),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
@@ -646,6 +648,9 @@ class QuotaEngineTestCase(test.TestCase):
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
+ self.assertEqual(result3, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
def test_commit(self):
context = FakeContext(None, None)
@@ -654,7 +659,7 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
- ('commit', context, ['resv-01', 'resv-02', 'resv-03']),
+ ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None),
])
def test_rollback(self):
@@ -664,7 +669,7 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
- ('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
+ ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None),
])
def test_usage_reset(self):
@@ -728,11 +733,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.calls = []
- timeutils.set_time_override()
-
- def tearDown(self):
- timeutils.clear_time_override()
- super(DbQuotaDriverTestCase, self).tearDown()
+ self.useFixture(test.TimeOverride())
def test_get_defaults(self):
# Use our pre-defined resources
@@ -742,8 +743,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=10,
cores=20,
ram=50 * 1024,
- volumes=10,
- gigabytes=1000,
floating_ips=10,
metadata_items=128,
injected_files=5,
@@ -762,7 +761,6 @@ class DbQuotaDriverTestCase(test.TestCase):
return dict(
instances=5,
ram=25 * 1024,
- gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
@@ -778,8 +776,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=5,
cores=20,
ram=25 * 1024,
- volumes=10,
- gigabytes=500,
floating_ips=10,
metadata_items=64,
injected_files=5,
@@ -799,7 +795,6 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
- gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
@@ -810,7 +805,6 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
- gigabytes=50,
injected_files=2,
injected_file_path_bytes=127,
)
@@ -822,8 +816,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
- volumes=dict(in_use=2, reserved=0),
- gigabytes=dict(in_use=10, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
@@ -863,17 +855,7 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=10 * 1024,
reserved=0,
),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
+ floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
@@ -941,17 +923,7 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=10 * 1024,
reserved=0,
),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
+ floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
@@ -1020,16 +992,6 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=10 * 1024,
reserved=0,
),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
floating_ips=dict(
limit=10,
in_use=2,
@@ -1089,12 +1051,7 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=4,
reserved=4,
),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- injected_files=dict(
+ injected_files=dict(
limit=2,
in_use=0,
reserved=0,
@@ -1126,12 +1083,6 @@ class DbQuotaDriverTestCase(test.TestCase):
ram=dict(
limit=25 * 1024,
),
- volumes=dict(
- limit=10,
- ),
- gigabytes=dict(
- limit=50,
- ),
floating_ips=dict(
limit=10,
),
@@ -1207,7 +1158,6 @@ class DbQuotaDriverTestCase(test.TestCase):
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
- 'volumes', 'gigabytes',
'floating_ips', 'security_groups'],
True)
@@ -1216,8 +1166,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=10,
cores=20,
ram=50 * 1024,
- volumes=10,
- gigabytes=1000,
floating_ips=10,
security_groups=10,
))
@@ -1272,7 +1220,7 @@ class DbQuotaDriverTestCase(test.TestCase):
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
- until_refresh, max_age):
+ until_refresh, max_age, project_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
@@ -1382,7 +1330,7 @@ class DbQuotaDriverTestCase(test.TestCase):
calls.append(('quota_usage_update', context, project_id,
resource, kwargs))
if resource == 'nonexist':
- raise exception.QuotaUsageNotFound()
+ raise exception.QuotaUsageNotFound(project_id=project_id)
self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
ctx = FakeContext('test_project', 'test_class')
@@ -1456,7 +1404,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def fake_get_session():
return FakeSession()
- def fake_get_quota_usages(context, session):
+ def fake_get_quota_usages(context, session, project_id):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
@@ -1482,10 +1430,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
- self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create)
+ self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create)
- timeutils.set_time_override()
+ self.useFixture(test.TimeOverride())
def _make_quota_usage(self, project_id, resource, in_use, reserved,
until_refresh, created_at, updated_at):
@@ -1959,3 +1907,65 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
project_id='test_project',
delta=-2 * 1024),
])
+
+
+class NoopQuotaDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(NoopQuotaDriverTestCase, self).setUp()
+
+ self.flags(quota_instances=10,
+ quota_cores=20,
+ quota_ram=50 * 1024,
+ quota_floating_ips=10,
+ quota_metadata_items=128,
+ quota_injected_files=5,
+ quota_injected_file_content_bytes=10 * 1024,
+ quota_injected_file_path_bytes=255,
+ quota_security_groups=10,
+ quota_security_group_rules=20,
+ reservation_expire=86400,
+ until_refresh=0,
+ max_age=0,
+ )
+
+ self.expected_quotas = dict([(r, -1)
+ for r in quota.QUOTAS._resources])
+ self.driver = quota.NoopQuotaDriver()
+
+ def test_get_defaults(self):
+ # Use our pre-defined resources
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+ self.assertEqual(self.expected_quotas, result)
+
+ def test_get_class_quotas(self):
+ result = self.driver.get_class_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_class')
+ self.assertEqual(self.expected_quotas, result)
+
+ def test_get_class_quotas_no_defaults(self):
+ result = self.driver.get_class_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_class',
+ False)
+ self.assertEqual(self.expected_quotas, result)
+
+ def test_get_project_quotas(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project')
+ self.assertEqual(self.expected_quotas, result)
+
+ def test_get_project_quotas_no_defaults(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ defaults=False)
+ self.assertEqual(self.expected_quotas, result)
+
+ def test_get_project_quotas_no_usages(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ usages=False)
+ self.assertEqual(self.expected_quotas, result)
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 3f31e8e01..71beed51e 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -23,11 +23,9 @@ Unit Tests for remote procedure calls using queue
import mox
import sys
-
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova import service
@@ -47,11 +45,12 @@ test_service_opts = [
help="Port number to bind test service to"),
]
-flags.FLAGS.register_opts(test_service_opts)
+CONF = cfg.CONF
+CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
- """Fake manager for tests"""
+ """Fake manager for tests."""
def test_method(self):
return 'manager'
@@ -62,7 +61,7 @@ class ExtendedService(service.Service):
class ServiceManagerTestCase(test.TestCase):
- """Test cases for Services"""
+ """Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
@@ -106,14 +105,16 @@ class ServiceFlagsTestCase(test.TestCase):
class ServiceTestCase(test.TestCase):
- """Test cases for Services"""
+ """Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.host = 'foo'
self.binary = 'nova-fake'
self.topic = 'fake'
- self.mox.StubOutWithMock(service, 'db')
+ self.mox.StubOutWithMock(db, 'service_create')
+ self.mox.StubOutWithMock(db, 'service_get_by_args')
+ self.flags(use_local=True, group='conductor')
def test_create(self):
@@ -128,18 +129,16 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': self.host,
'binary': self.binary,
'topic': self.topic,
- 'report_count': 0,
- 'availability_zone': 'nova'}
+ 'report_count': 0}
service_ref = {'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
- 'availability_zone': 'nova',
'id': 1}
- service.db.service_get_by_args(mox.IgnoreArg(),
+ db.service_get_by_args(mox.IgnoreArg(),
self.host, self.binary).AndRaise(exception.NotFound())
- service.db.service_create(mox.IgnoreArg(),
+ db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
return service_ref
@@ -159,7 +158,7 @@ class ServiceTestCase(test.TestCase):
self._service_start_mocks()
# pre_start_hook is called after service record is created,
# but before RPC consumer is created
- self.manager_mock.pre_start_hook()
+ self.manager_mock.pre_start_hook(rpc_connection=mox.IgnoreArg())
self.manager_mock.create_rpc_dispatcher()
# post_start_hook is called after RPC consumer is created.
self.manager_mock.post_start_hook()
@@ -172,40 +171,6 @@ class ServiceTestCase(test.TestCase):
'nova.tests.test_service.FakeManager')
serv.start()
- def test_report_state_newly_disconnected(self):
- self._service_start_mocks()
-
- service.db.service_get(mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(Exception())
-
- self.mox.ReplayAll()
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
- serv.start()
- serv.report_state()
- self.assert_(serv.model_disconnected)
-
- def test_report_state_newly_connected(self):
- service_ref = self._service_start_mocks()
-
- service.db.service_get(mox.IgnoreArg(),
- service_ref['id']).AndReturn(service_ref)
- service.db.service_update(mox.IgnoreArg(), service_ref['id'],
- mox.ContainsKeyValue('report_count', 1))
-
- self.mox.ReplayAll()
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
- serv.start()
- serv.model_disconnected = True
- serv.report_state()
-
- self.assert_(not serv.model_disconnected)
-
class TestWSGIService(test.TestCase):
@@ -219,6 +184,14 @@ class TestWSGIService(test.TestCase):
self.assertNotEqual(0, test_service.port)
test_service.stop()
+ def test_service_random_port_with_ipv6(self):
+ CONF.set_default("test_service_listen", "::1")
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+ self.assertEqual("::1", test_service.host)
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
class TestLauncher(test.TestCase):
diff --git a/nova/tests/test_sqlalchemy.py b/nova/tests/test_sqlalchemy.py
new file mode 100644
index 000000000..f79d607f8
--- /dev/null
+++ b/nova/tests/test_sqlalchemy.py
@@ -0,0 +1,66 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for SQLAlchemy specific code."""
+
+from eventlet import db_pool
+try:
+ import MySQLdb
+except ImportError:
+ MySQLdb = None
+
+from nova import context
+from nova.db.sqlalchemy import session
+from nova import test
+
+
+class DbPoolTestCase(test.TestCase):
+ def setUp(self):
+ super(DbPoolTestCase, self).setUp()
+ self.flags(sql_dbpool_enable=True)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ if not MySQLdb:
+ self.skipTest("Unable to test due to lack of MySQLdb")
+
+ def test_db_pool_option(self):
+ self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
+ sql_max_pool_size=42)
+
+ info = {}
+
+ class FakeConnectionPool(db_pool.ConnectionPool):
+ def __init__(self, mod_name, **kwargs):
+ info['module'] = mod_name
+ info['kwargs'] = kwargs
+ super(FakeConnectionPool, self).__init__(mod_name,
+ **kwargs)
+
+ def connect(self, *args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(db_pool, 'ConnectionPool',
+ FakeConnectionPool)
+
+ sql_connection = 'mysql://user:pass@127.0.0.1/nova'
+ self.assertRaises(test.TestingException, session.create_engine,
+ sql_connection)
+
+ self.assertEqual(info['module'], MySQLdb)
+ self.assertEqual(info['kwargs']['max_idle'], 11)
+ self.assertEqual(info['kwargs']['min_size'], 21)
+ self.assertEqual(info['kwargs']['max_size'], 42)
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index f89a5bb94..9e2d3560c 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -18,10 +18,15 @@
"""Tests for the testing base code."""
+from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
+CONF = cfg.CONF
+CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
+
+
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
@@ -30,7 +35,8 @@ class IsolationTestCase(test.TestCase):
"""
def test_service_isolation(self):
- self.start_service('compute')
+ self.flags(use_local=True, group='conductor')
+ self.useFixture(test.ServiceFixture('compute'))
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py
index 237339758..722377aa5 100644
--- a/nova/tests/test_test_utils.py
+++ b/nova/tests/test_test_utils.py
@@ -21,7 +21,7 @@ from nova.tests import utils as test_utils
class TestUtilsTestCase(test.TestCase):
def test_get_test_admin_context(self):
- """get_test_admin_context's return value behaves like admin context"""
+ # get_test_admin_context's return value behaves like admin context.
ctxt = test_utils.get_test_admin_context()
# TODO(soren): This should verify the full interface context
@@ -29,13 +29,13 @@ class TestUtilsTestCase(test.TestCase):
self.assertTrue(ctxt.is_admin)
def test_get_test_instance(self):
- """get_test_instance's return value looks like an instance_ref"""
+ # get_test_instance's return value looks like an instance_ref.
instance_ref = test_utils.get_test_instance()
ctxt = test_utils.get_test_admin_context()
db.instance_get(ctxt, instance_ref['id'])
def _test_get_test_network_info(self):
- """Does the return value match a real network_info structure"""
+ """Does the return value match a real network_info structure."""
# The challenge here is to define what exactly such a structure
# must look like.
pass
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 8b883f755..2c46b27bd 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -22,22 +22,15 @@ import os.path
import StringIO
import tempfile
-import eventlet
-from eventlet import greenpool
import mox
import nova
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
-from nova.openstack.common import uuidutils
from nova import test
from nova import utils
-FLAGS = flags.FLAGS
-
-
class ByteConversionTest(test.TestCase):
def test_string_conversions(self):
working_examples = {
@@ -74,6 +67,7 @@ class ByteConversionTest(test.TestCase):
class ExecuteTestCase(test.TestCase):
+
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
@@ -380,11 +374,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertFalse(utils.bool_from_str(None))
self.assertFalse(utils.bool_from_str('junk'))
- def test_generate_glance_url(self):
- generated_url = utils.generate_glance_url()
- actual_url = "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
- self.assertEqual(generated_url, actual_url)
-
def test_read_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
@@ -459,40 +448,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
- def test_service_is_up(self):
- fts_func = datetime.datetime.fromtimestamp
- fake_now = 1000
- down_time = 5
-
- self.flags(service_down_time=down_time)
- self.mox.StubOutWithMock(timeutils, 'utcnow')
-
- # Up (equal)
- timeutils.utcnow().AndReturn(fts_func(fake_now))
- service = {'updated_at': fts_func(fake_now - down_time),
- 'created_at': fts_func(fake_now - down_time)}
- self.mox.ReplayAll()
- result = utils.service_is_up(service)
- self.assertTrue(result)
-
- self.mox.ResetAll()
- # Up
- timeutils.utcnow().AndReturn(fts_func(fake_now))
- service = {'updated_at': fts_func(fake_now - down_time + 1),
- 'created_at': fts_func(fake_now - down_time + 1)}
- self.mox.ReplayAll()
- result = utils.service_is_up(service)
- self.assertTrue(result)
-
- self.mox.ResetAll()
- # Down
- timeutils.utcnow().AndReturn(fts_func(fake_now))
- service = {'updated_at': fts_func(fake_now - down_time - 1),
- 'created_at': fts_func(fake_now - down_time - 1)}
- self.mox.ReplayAll()
- result = utils.service_is_up(service)
- self.assertFalse(result)
-
def test_xhtml_escape(self):
self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"'))
self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'"))
@@ -509,31 +464,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertEquals(h1, h2)
-class IsUUIDLikeTestCase(test.TestCase):
- def assertUUIDLike(self, val, expected):
- result = uuidutils.is_uuid_like(val)
- self.assertEqual(result, expected)
-
- def test_good_uuid(self):
- val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- self.assertUUIDLike(val, True)
-
- def test_integer_passed(self):
- val = 1
- self.assertUUIDLike(val, False)
-
- def test_non_uuid_string_passed(self):
- val = 'foo-fooo'
- self.assertUUIDLike(val, False)
-
- def test_non_uuid_string_passed2(self):
- val = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
- self.assertUUIDLike(val, False)
-
- def test_gen_valid_uuid(self):
- self.assertUUIDLike(str(utils.gen_uuid()), True)
-
-
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
@@ -744,7 +674,7 @@ class AuditPeriodTest(test.TestCase):
class DiffDict(test.TestCase):
- """Unit tests for diff_dict()"""
+ """Unit tests for diff_dict()."""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
@@ -780,8 +710,50 @@ class MkfsTestCase(test.TestCase):
def test_mkfs(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev')
+ utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev')
utils.execute('mkswap', '/my/swap/block/dev')
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev')
+ utils.mkfs('msdos', '/my/msdos/block/dev')
utils.mkfs('swap', '/my/swap/block/dev')
+
+ def test_mkfs_with_label(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F',
+ '-L', 'ext4-vol', '/my/block/dev')
+ utils.execute('mkfs', '-t', 'msdos',
+ '-n', 'msdos-vol', '/my/msdos/block/dev')
+ utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev')
+ self.mox.ReplayAll()
+
+ utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
+ utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
+ utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
+
+
+class LastBytesTestCase(test.TestCase):
+ """Test the last_bytes() utility method."""
+
+ def setUp(self):
+ super(LastBytesTestCase, self).setUp()
+ self.f = StringIO.StringIO('1234567890')
+
+ def test_truncated(self):
+ self.f.seek(0, os.SEEK_SET)
+ out, remaining = utils.last_bytes(self.f, 5)
+ self.assertEqual(out, '67890')
+ self.assertTrue(remaining > 0)
+
+ def test_read_all(self):
+ self.f.seek(0, os.SEEK_SET)
+ out, remaining = utils.last_bytes(self.f, 1000)
+ self.assertEqual(out, '1234567890')
+ self.assertFalse(remaining > 0)
+
+ def test_seek_too_far_real_file(self):
+ # StringIO doesn't raise IOError if you see past the start of the file.
+ flo = tempfile.TemporaryFile()
+ content = '1234567890'
+ flo.write(content)
+ self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
index cec418aee..5568ff0de 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/test_versions.py
@@ -14,15 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import __builtin__
+import StringIO
+from nova.openstack.common import cfg
from nova import test
from nova import version
class VersionTestCase(test.TestCase):
- """Test cases for Versions code"""
+ """Test cases for Versions code."""
def setUp(self):
- """setup test with unchanging values"""
+ """setup test with unchanging values."""
super(VersionTestCase, self).setUp()
self.version = version
self.version.FINAL = False
@@ -31,29 +34,52 @@ class VersionTestCase(test.TestCase):
self.version.version_info = {'branch_nick': u'LOCALBRANCH',
'revision_id': 'LOCALREVISION',
'revno': 0}
+ self.version.NOVA_PACKAGE = "g9ec3421"
def test_version_string_is_good(self):
- """Ensure version string works"""
+ # Ensure version string works.
self.assertEqual("2012.10-dev", self.version.version_string())
def test_canonical_version_string_is_good(self):
- """Ensure canonical version works"""
+ # Ensure canonical version works.
self.assertEqual("2012.10", self.version.canonical_version_string())
def test_final_version_strings_are_identical(self):
- """Ensure final version strings match only at release"""
+ # Ensure final version strings match only at release.
self.assertNotEqual(self.version.canonical_version_string(),
self.version.version_string())
self.version.FINAL = True
self.assertEqual(self.version.canonical_version_string(),
self.version.version_string())
- def test_vcs_version_string_is_good(self):
- """Ensure uninstalled code generates local """
- self.assertEqual("LOCALBRANCH:LOCALREVISION",
- self.version.vcs_version_string())
+ def test_version_string_with_package_is_good(self):
+ # Ensure uninstalled code get version string.
+ self.assertEqual("2012.10-g9ec3421",
+ self.version.version_string_with_package())
- def test_version_string_with_vcs_is_good(self):
- """Ensure uninstalled code get version string"""
- self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
- self.version.version_string_with_vcs())
+ def test_release_file(self):
+ version.loaded = False
+ real_open = __builtin__.open
+ real_find_file = cfg.CONF.find_file
+
+ def fake_find_file(self, name):
+ if name == "release":
+ return "/etc/nova/release"
+ return real_find_file(self, name)
+
+ def fake_open(path, *args, **kwargs):
+ if path == "/etc/nova/release":
+ data = """[Nova]
+vendor = ACME Corporation
+product = ACME Nova
+package = 1337"""
+ return StringIO.StringIO(data)
+
+ return real_open(path, *args, **kwargs)
+
+ self.stubs.Set(__builtin__, 'open', fake_open)
+ self.stubs.Set(cfg.ConfigOpts, 'find_file', fake_find_file)
+
+ self.assertEqual(version.vendor_string(), "ACME Corporation")
+ self.assertEqual(version.product_string(), "ACME Nova")
+ self.assertEqual(version.package_string(), "1337")
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 16775054f..452277c54 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -17,18 +17,11 @@
import os
-from nova import exception
-from nova import flags
from nova import test
-from nova import tests
from nova import utils
from nova.virt.disk import api as disk_api
from nova.virt import driver
-from nova.openstack.common import jsonutils
-
-FLAGS = flags.FLAGS
-
class TestVirtDriver(test.TestCase):
def test_block_device(self):
@@ -102,7 +95,7 @@ class TestVirtDisk(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
- def test_lxc_destroy_container(self):
+ def test_lxc_teardown_container(self):
def proc_mounts(self, mount_point):
mount_points = {
@@ -110,7 +103,6 @@ class TestVirtDisk(test.TestCase):
'/mnt/loop/part': '/dev/mapper/loop0p1',
'/mnt/nbd/nopart': '/dev/nbd15',
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
- '/mnt/guestfs': 'guestmount',
}
return mount_points[mount_point]
@@ -118,83 +110,30 @@ class TestVirtDisk(test.TestCase):
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
- disk_api.destroy_container('/mnt/loop/nopart')
+ disk_api.teardown_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
- disk_api.destroy_container('/mnt/loop/part')
+ disk_api.teardown_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
- disk_api.destroy_container('/mnt/nbd/nopart')
+ disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
- disk_api.destroy_container('/mnt/nbd/part')
+ disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
- disk_api.destroy_container('/mnt/guestfs')
- expected_commands += [
- ('fusermount', '-u', '/mnt/guestfs'),
- ]
- # It's not worth trying to match the last timeout command
- self.executes.pop()
-
self.assertEqual(self.executes, expected_commands)
-
-
-class TestVirtDiskPaths(test.TestCase):
- def setUp(self):
- super(TestVirtDiskPaths, self).setUp()
-
- real_execute = utils.execute
-
- def nonroot_execute(*cmd_parts, **kwargs):
- kwargs.pop('run_as_root', None)
- return real_execute(*cmd_parts, **kwargs)
-
- self.stubs.Set(utils, 'execute', nonroot_execute)
-
- def test_check_safe_path(self):
- if tests.utils.is_osx():
- self.skipTest("Unable to test on OSX")
- ret = disk_api._join_and_check_path_within_fs('/foo', 'etc',
- 'something.conf')
- self.assertEquals(ret, '/foo/etc/something.conf')
-
- def test_check_unsafe_path(self):
- if tests.utils.is_osx():
- self.skipTest("Unable to test on OSX")
- self.assertRaises(exception.Invalid,
- disk_api._join_and_check_path_within_fs,
- '/foo', 'etc/../../../something.conf')
-
- def test_inject_files_with_bad_path(self):
- if tests.utils.is_osx():
- self.skipTest("Unable to test on OSX")
- self.assertRaises(exception.Invalid,
- disk_api._inject_file_into_fs,
- '/tmp', '/etc/../../../../etc/passwd',
- 'hax')
-
- def test_inject_metadata(self):
- if tests.utils.is_osx():
- self.skipTest("Unable to test on OSX")
- with utils.tempdir() as tmpdir:
- meta_objs = [{"key": "foo", "value": "bar"}]
- metadata = {"foo": "bar"}
- disk_api._inject_metadata_into_fs(meta_objs, tmpdir)
- json_file = os.path.join(tmpdir, 'meta.js')
- json_data = jsonutils.loads(open(json_file).read())
- self.assertEqual(metadata, json_data)
diff --git a/nova/tests/test_virt_disk.py b/nova/tests/test_virt_disk.py
new file mode 100644
index 000000000..902d49704
--- /dev/null
+++ b/nova/tests/test_virt_disk.py
@@ -0,0 +1,198 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from nova import test
+from nova.tests import fakeguestfs
+from nova.virt.disk import api as diskapi
+from nova.virt.disk.vfs import guestfs as vfsguestfs
+
+
+class VirtDiskTest(test.TestCase):
+
+ def setUp(self):
+ super(VirtDiskTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsguestfs.guestfs = fakeguestfs
+
+ def test_inject_data_key(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertTrue("/root/.ssh" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0700})
+ self.assertTrue("/root/.ssh/authorized_keys" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0700})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertTrue("/etc/rc.d/rc.local" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "Hello World#!/bin/sh\n# Added by " +
+ "Nova to ensure injected ssh keys " +
+ "have the right context\nrestorecon " +
+ "-RF root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0700})
+
+ self.assertTrue("/root/.ssh" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0700})
+ self.assertTrue("/root/.ssh/authorized_keys" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0700})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux_append_with_newline(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertTrue("/etc/rc.d/rc.local" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "#!/bin/sh\necho done\n# Added "
+ "by Nova to ensure injected ssh keys have "
+ "the right context\nrestorecon -RF "
+ "root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0700})
+ vfs.teardown()
+
+ def test_inject_net(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_net_into_fs("mynetconfig", vfs)
+
+ self.assertTrue("/etc/network/interfaces" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/etc/network/interfaces"],
+ {'content': 'mynetconfig',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_metadata(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_metadata_into_fs([{"key": "foo",
+ "value": "bar"},
+ {"key": "eek",
+ "value": "wizz"}], vfs)
+
+ self.assertTrue("/meta.js" in vfs.handle.files)
+ self.assertEquals(vfs.handle.files["/meta.js"],
+ {'content': '{"foo": "bar", ' +
+ '"eek": "wizz"}',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_admin_password(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ def fake_salt():
+ return "1234567890abcdef"
+
+ self.stubs.Set(diskapi, '_generate_salt', fake_salt)
+
+ vfs.handle.write("/etc/shadow",
+ "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n")
+
+ vfs.handle.write("/etc/passwd",
+ "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
+
+ diskapi._inject_admin_password_into_fs("123456", vfs)
+
+ self.assertEquals(vfs.handle.files["/etc/passwd"],
+ {'content': "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:" +
+ "/sbin/nologin\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0700,
+ 'uid': 100})
+ shadow = vfs.handle.files["/etc/shadow"]
+
+ # if the encrypted password is only 13 characters long, then
+ # nova.virt.disk.api:_set_password fell back to DES.
+ if len(shadow['content']) == 91:
+ self.assertEquals(shadow,
+ {'content': "root:12tir.zIbWQ3c" +
+ ":14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0700,
+ 'uid': 100})
+ else:
+ self.assertEquals(shadow,
+ {'content': "root:$1$12345678$a4ge4d5iJ5vw" +
+ "vbFS88TEN0:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0700,
+ 'uid': 100})
+ vfs.teardown()
diff --git a/nova/tests/test_virt_disk_vfs_guestfs.py b/nova/tests/test_virt_disk_vfs_guestfs.py
new file mode 100644
index 000000000..1ff2581e7
--- /dev/null
+++ b/nova/tests/test_virt_disk_vfs_guestfs.py
@@ -0,0 +1,176 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from nova import test
+
+from nova.tests import fakeguestfs
+from nova.virt.disk.vfs import guestfs as vfsimpl
+
+
+class VirtDiskVFSGuestFSTest(test.TestCase):
+
+ def setUp(self):
+ super(VirtDiskVFSGuestFSTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsimpl.guestfs = fakeguestfs
+
+ def test_appliance_setup_inspect(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ vfs.setup()
+
+ self.assertEqual(vfs.handle.running, True)
+ self.assertEqual(len(vfs.handle.mounts), 2)
+ self.assertEqual(vfs.handle.mounts[0][1],
+ "/dev/mapper/guestvgf-lv_root")
+ self.assertEqual(vfs.handle.mounts[1][1], "/dev/vda1")
+ self.assertEqual(vfs.handle.mounts[0][2], "/")
+ self.assertEqual(vfs.handle.mounts[1][2], "/boot")
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertEqual(vfs.handle, None)
+ self.assertEqual(handle.running, False)
+ self.assertEqual(handle.closed, True)
+ self.assertEqual(len(handle.mounts), 0)
+
+ def test_appliance_setup_static_nopart(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=None)
+ vfs.setup()
+
+ self.assertEqual(vfs.handle.running, True)
+ self.assertEqual(len(vfs.handle.mounts), 1)
+ self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda")
+ self.assertEqual(vfs.handle.mounts[0][2], "/")
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertEqual(vfs.handle, None)
+ self.assertEqual(handle.running, False)
+ self.assertEqual(handle.closed, True)
+ self.assertEqual(len(handle.mounts), 0)
+
+ def test_appliance_setup_static_part(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=2)
+ vfs.setup()
+
+ self.assertEqual(vfs.handle.running, True)
+ self.assertEqual(len(vfs.handle.mounts), 1)
+ self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda2")
+ self.assertEqual(vfs.handle.mounts[0][2], "/")
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertEqual(vfs.handle, None)
+ self.assertEqual(handle.running, False)
+ self.assertEqual(handle.closed, True)
+ self.assertEqual(len(handle.mounts), 0)
+
+ def test_makepath(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertTrue("/some/dir" in vfs.handle.files)
+ self.assertTrue("/other/dir" in vfs.handle.files)
+ self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
+ self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
+
+ vfs.teardown()
+
+ def test_append_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertTrue("/some/file" in vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/some/file"]["content"],
+ "Hello World Goodbye")
+
+ vfs.teardown()
+
+ def test_replace_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertTrue("/some/file" in vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/some/file"]["content"],
+ "Goodbye")
+
+ vfs.teardown()
+
+ def test_read_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertEqual(vfs.read_file("/some/file"), "Hello World")
+
+ vfs.teardown()
+
+ def test_has_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ vfs.teardown()
+
+ def test_set_permissions(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0700)
+
+ vfs.set_permissions("/some/file", 0777)
+ self.assertEquals(vfs.handle.files["/some/file"]["mode"], 0777)
+
+ vfs.teardown()
+
+ def test_set_ownership(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEquals(vfs.handle.files["/some/file"]["uid"], 100)
+ self.assertEquals(vfs.handle.files["/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEquals(vfs.handle.files["/some/file"]["uid"], 105)
+ self.assertEquals(vfs.handle.files["/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEquals(vfs.handle.files["/some/file"]["uid"], 105)
+ self.assertEquals(vfs.handle.files["/some/file"]["gid"], 500)
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEquals(vfs.handle.files["/some/file"]["uid"], 110)
+ self.assertEquals(vfs.handle.files["/some/file"]["gid"], 600)
+
+ vfs.teardown()
diff --git a/nova/tests/test_virt_disk_vfs_localfs.py b/nova/tests/test_virt_disk_vfs_localfs.py
new file mode 100644
index 000000000..806ed01d8
--- /dev/null
+++ b/nova/tests/test_virt_disk_vfs_localfs.py
@@ -0,0 +1,353 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova import test
+from nova.tests import utils as tests_utils
+from nova import utils
+
+from nova.virt.disk.vfs import localfs as vfsimpl
+
+dirs = []
+files = {}
+commands = []
+
+
+def fake_execute(*args, **kwargs):
+ commands.append({"args": args, "kwargs": kwargs})
+
+ if args[0] == "readlink":
+ if args[1] == "-nm":
+ if args[2] in ["/scratch/dir/some/file",
+ "/scratch/dir/some/dir",
+ "/scratch/dir/other/dir",
+ "/scratch/dir/other/file"]:
+ return args[2], ""
+ elif args[1] == "-e":
+ if args[2] in files:
+ return args[2], ""
+
+ return "", "No such file"
+ elif args[0] == "mkdir":
+ dirs.append(args[2])
+ elif args[0] == "chown":
+ owner = args[1]
+ path = args[2]
+ if not path in files:
+ raise Exception("No such file: " + path)
+
+ sep = owner.find(':')
+ if sep != -1:
+ user = owner[0:sep]
+ group = owner[sep + 1:]
+ else:
+ user = owner
+ group = None
+
+ if user:
+ if user == "fred":
+ uid = 105
+ else:
+ uid = 110
+ files[path]["uid"] = uid
+ if group:
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chgrp":
+ group = args[1]
+ path = args[2]
+ if not path in files:
+ raise Exception("No such file: " + path)
+
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chmod":
+ mode = args[1]
+ path = args[2]
+ if not path in files:
+ raise Exception("No such file: " + path)
+
+ files[path]["mode"] = int(mode, 8)
+ elif args[0] == "cat":
+ path = args[1]
+ if not path in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0700
+ }
+ return files[path]["content"], ""
+ elif args[0] == "tee":
+ if args[1] == "-a":
+ path = args[2]
+ append = True
+ else:
+ path = args[1]
+ append = False
+ print str(files)
+ if not path in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0700,
+ }
+ if append:
+ files[path]["content"] += kwargs["process_input"]
+ else:
+ files[path]["content"] = kwargs["process_input"]
+
+
+class VirtDiskVFSLocalFSTestPaths(test.TestCase):
+ def setUp(self):
+ super(VirtDiskVFSLocalFSTestPaths, self).setUp()
+
+ real_execute = utils.execute
+
+ def nonroot_execute(*cmd_parts, **kwargs):
+ kwargs.pop('run_as_root', None)
+ return real_execute(*cmd_parts, **kwargs)
+
+ self.stubs.Set(utils, 'execute', nonroot_execute)
+
+ def test_check_safe_path(self):
+ if tests_utils.is_osx():
+ self.skipTest("Unable to test on OSX")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ ret = vfs._canonical_path('etc/something.conf')
+ self.assertEquals(ret, '/foo/etc/something.conf')
+
+ def test_check_unsafe_path(self):
+ if tests_utils.is_osx():
+ self.skipTest("Unable to test on OSX")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ self.assertRaises(exception.Invalid,
+ vfs._canonical_path,
+ 'etc/../../../something.conf')
+
+
+class VirtDiskVFSLocalFSTest(test.TestCase):
+
+ def setUp(self):
+ super(VirtDiskVFSLocalFSTest, self).setUp()
+
+ def test_makepath(self):
+ global dirs, commands
+ dirs = []
+ commands = []
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertEqual(dirs,
+ ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]),
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True}}])
+
+ def test_append_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertTrue("/scratch/dir/some/file" in files)
+ self.assertEquals(files["/scratch/dir/some/file"]["content"],
+ "Hello World Goodbye")
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('tee', '-a',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': ' Goodbye',
+ 'run_as_root': True}}])
+
+ def test_replace_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertTrue("/scratch/dir/some/file" in files)
+ self.assertEquals(files["/scratch/dir/some/file"]["content"],
+ "Goodbye")
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('tee', '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': 'Goodbye',
+ 'run_as_root': True}}])
+
+ def test_read_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ self.assertEqual(vfs.read_file("/some/file"), "Hello World")
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}}])
+
+ def test_has_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True}},
+ ])
+
+ def test_set_permissions(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ vfs.set_permissions("/some/file", 0777)
+ self.assertEquals(files["/scratch/dir/some/file"]["mode"], 0777)
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('chmod', '777',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}}])
+
+ def test_set_ownership(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertEquals(files["/scratch/dir/some/file"]["uid"], 100)
+ self.assertEquals(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEquals(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEquals(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEquals(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEquals(files["/scratch/dir/some/file"]["gid"], 500)
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEquals(files["/scratch/dir/some/file"]["uid"], 110)
+ self.assertEquals(files["/scratch/dir/some/file"]["gid"], 600)
+
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('chown', 'fred',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('chgrp', 'users',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}},
+ {'args': ('chown', 'joe:admins',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True}}])
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 9d48cdf06..199ae30b1 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -15,16 +15,17 @@
# under the License.
import base64
+import fixtures
import netaddr
import sys
import traceback
from nova.compute.manager import ComputeManager
-from nova import db
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
+from nova.tests import fake_libvirt_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
from nova.virt import fake
@@ -52,7 +53,7 @@ def catch_notimplementederror(f):
return wrapped_func
-class _FakeDriverBackendTestCase(test.TestCase):
+class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
@@ -73,12 +74,21 @@ class _FakeDriverBackendTestCase(test.TestCase):
import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
- self.saved_libvirt_imagebackend = nova.virt.libvirt.driver.imagebackend
- nova.virt.libvirt.driver.imagebackend = fake_imagebackend
- nova.virt.libvirt.driver.libvirt = fakelibvirt
- nova.virt.libvirt.driver.libvirt_utils = fake_libvirt_utils
- nova.virt.libvirt.snapshots.libvirt_utils = fake_libvirt_utils
- nova.virt.libvirt.firewall.libvirt = fakelibvirt
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.imagebackend',
+ fake_imagebackend))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt',
+ fakelibvirt))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.snapshots.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.firewall.libvirt',
+ fakelibvirt))
self.flags(rescue_image_id="2",
rescue_kernel_id="3",
@@ -111,12 +121,8 @@ class _FakeDriverBackendTestCase(test.TestCase):
# Restore libvirt
import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
- nova.virt.libvirt.driver.imagebackend = self.saved_libvirt_imagebackend
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
- nova.virt.libvirt.driver.libvirt = self.saved_libvirt
- nova.virt.libvirt.driver.libvirt_utils = self.saved_libvirt
- nova.virt.libvirt.firewall.libvirt = self.saved_libvirt
def setUp(self):
super(_FakeDriverBackendTestCase, self).setUp()
@@ -132,7 +138,7 @@ class _FakeDriverBackendTestCase(test.TestCase):
super(_FakeDriverBackendTestCase, self).tearDown()
-class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase):
+class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class"""
@@ -209,13 +215,15 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
- self.ctxt, instance_ref, img_ref['id'])
+ self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
- self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'])
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
@catch_notimplementederror
def test_reboot(self):
@@ -273,10 +281,6 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
- def test_poll_rescued_instances(self):
- self.connection.poll_rescued_instances(10)
-
- @catch_notimplementederror
def test_migrate_disk_and_power_off(self):
instance_ref, network_info = self._get_running_instance()
instance_type_ref = test_utils.get_test_instance_type()
@@ -340,13 +344,13 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
- self.connection.resume(instance_ref)
+ self.connection.resume(instance_ref, network_info)
@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
- self.connection.resume(instance_ref)
+ self.connection.resume(instance_ref, network_info)
@catch_notimplementederror
def test_destroy_instance_nonexistent(self):
@@ -375,10 +379,10 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def test_attach_detach_volume(self):
instance_ref, network_info = self._get_running_instance()
self.connection.attach_volume({'driver_volume_type': 'fake'},
- instance_ref['name'],
+ instance_ref,
'/mnt/nova/something')
self.connection.detach_volume({'driver_volume_type': 'fake'},
- instance_ref['name'],
+ instance_ref,
'/mnt/nova/something')
@catch_notimplementederror
@@ -386,11 +390,11 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.attach_volume({'driver_volume_type': 'fake'},
- instance_ref['name'],
+ instance_ref,
'/mnt/nova/something')
self.connection.power_on(instance_ref)
self.connection.detach_volume({'driver_volume_type': 'fake'},
- instance_ref['name'],
+ instance_ref,
'/mnt/nova/something')
@catch_notimplementederror
@@ -428,6 +432,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_get_console_output(self):
+ fake_libvirt_utils.files['dummy.log'] = ''
instance_ref, network_info = self._get_running_instance()
console_output = self.connection.get_console_output(instance_ref)
self.assertTrue(isinstance(console_output, basestring))
@@ -525,19 +530,19 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
-class AbstractDriverTestCase(_VirtDriverTestCase):
+class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
-class FakeConnectionTestCase(_VirtDriverTestCase):
+class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
super(FakeConnectionTestCase, self).setUp()
-class LibvirtConnTestCase(_VirtDriverTestCase):
+class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 757ec2bf2..577d227ce 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -16,30 +16,28 @@
# under the License.
"""
-Test suite for VMWareAPI.
+Test suite for VMwareAPI.
"""
from nova.compute import power_state
+from nova.compute import task_states
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import test
import nova.tests.image.fake
+from nova.tests import matchers
from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
-FLAGS = flags.FLAGS
-
-
-class VMWareAPIVMTestCase(test.TestCase):
+class VMwareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
- super(VMWareAPIVMTestCase, self).setUp()
+ super(VMwareAPIVMTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
@@ -50,7 +48,7 @@ class VMWareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- self.conn = driver.VMWareESXDriver(None, False)
+ self.conn = driver.VMwareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
@@ -80,7 +78,7 @@ class VMWareAPIVMTestCase(test.TestCase):
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
- super(VMWareAPIVMTestCase, self).tearDown()
+ super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
@@ -163,17 +161,29 @@ class VMWareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
- self.conn.snapshot(self.context, self.instance, "Test-Snapshot")
+ self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
+ func_call_matcher.call)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
+ self.assertIsNone(func_call_matcher.match())
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
- self.context, self.instance, "Test-Snapshot")
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
def test_reboot(self):
self._create_vm()
@@ -219,21 +229,21 @@ class VMWareAPIVMTestCase(test.TestCase):
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.PAUSED)
- self.conn.resume(self.instance)
+ self.conn.resume(self.instance, self.network_info)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
- self.instance)
+ self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
- self.instance)
+ self.instance, self.network_info)
def test_get_info(self):
self._create_vm()
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
index 849206973..b4b25ed97 100644
--- a/nova/tests/test_wsgi.py
+++ b/nova/tests/test_wsgi.py
@@ -21,8 +21,6 @@
import os.path
import tempfile
-import unittest
-
import nova.exception
from nova import test
import nova.wsgi
@@ -42,7 +40,7 @@ class TestLoaderNothingExists(test.TestCase):
)
-class TestLoaderNormalFilesystem(unittest.TestCase):
+class TestLoaderNormalFilesystem(test.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
@@ -52,6 +50,7 @@ document_root = /tmp
"""
def setUp(self):
+ super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
@@ -74,9 +73,10 @@ document_root = /tmp
def tearDown(self):
self.config.close()
+ super(TestLoaderNormalFilesystem, self).tearDown()
-class TestWSGIServer(unittest.TestCase):
+class TestWSGIServer(test.TestCase):
"""WSGI server tests."""
def test_no_app(self):
@@ -90,3 +90,12 @@ class TestWSGIServer(unittest.TestCase):
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
+
+ def test_start_random_port_with_ipv6(self):
+ server = nova.wsgi.Server("test_random_port", None,
+ host="::1", port=0)
+ server.start()
+ self.assertEqual("::1", server.host)
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 54f7948b6..0b1c5d0e7 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -32,7 +32,7 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -41,6 +41,7 @@ from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
import nova.tests.image.fake as fake_image
+from nova.tests import matchers
from nova.tests.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
@@ -55,7 +56,12 @@ from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('network_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('default_availability_zone', 'nova.availability_zones')
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
@@ -107,6 +113,33 @@ def set_image_fixtures():
image_service.create(None, image_meta)
+def get_fake_device_info():
+ # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
+ # can be removed from the dict when LP bug #1087308 is fixed
+ fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
+ fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
+ fake = {'block_device_mapping':
+ [{'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'sr_uuid': 'falseSR',
+ 'introduce_sr_keys': ['sr_type'],
+ 'sr_type': 'iscsi',
+ 'vdi_uuid': fake_vdi_uuid,
+ 'target_discovered': False,
+ 'target_iqn': 'foo_iqn:foo_volid',
+ 'target_portal': 'localhost:3260',
+ 'volume_id': 'foo_volid',
+ 'target_lun': 1,
+ 'auth_password': 'my-p@55w0rd',
+ 'auth_username': 'johndoe',
+ 'auth_method': u'CHAP'}, },
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}, ],
+ 'root_device_name': '/dev/sda',
+ 'ephemerals': [],
+ 'swap': None, }
+ return fake
+
+
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
@@ -173,7 +206,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
- vol['availability_zone'] = FLAGS.node_availability_zone
+ vol['availability_zone'] = CONF.default_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@@ -232,14 +265,13 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
'dev/sd')
def test_attach_volume(self):
- """This shows how to test Ops classes' methods."""
+ # This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
- vm = xenapi_fake.create_vm(instance.name, 'Running')
+ vm = xenapi_fake.create_vm(instance['name'], 'Running')
result = conn.attach_volume(self._make_connection_info(),
- instance.name, '/dev/sdc')
+ instance, '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
@@ -249,17 +281,16 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
- """This shows how to test when exceptions are raised."""
+ # This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
- instance.name,
+ instance,
'/dev/sdc')
@@ -267,7 +298,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
- self.network = importutils.import_object(FLAGS.network_manager)
+ self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
@@ -277,7 +308,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
- xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ xenapi_fake.create_network('fake', CONF.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
@@ -308,11 +339,12 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
- super(XenAPIVMTestCase, self).tearDown()
fake_image.FakeImageService_reset()
+ super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
@@ -340,7 +372,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
- with open('xenapi/vm_rrd.xml') as f:
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'xenapi/vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
@@ -361,9 +394,10 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
- self.assertDictMatch(fake_diagnostics, expected)
+ self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_instance_snapshot_fails_with_no_primary_vdi(self):
+
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
@@ -384,9 +418,20 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
- self.context, instance, image_id)
+ self.context, instance, image_id,
+ lambda *args, **kwargs: None)
def test_instance_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
@@ -395,7 +440,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance = self._create_instance()
image_id = "my_snapshot_id"
- self.conn.snapshot(self.context, instance, image_id)
+ self.conn.snapshot(self.context, instance, image_id,
+ func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
@@ -404,7 +450,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
- self.assertEquals(vm_labels, [instance.name])
+ self.assertEquals(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
@@ -412,7 +458,10 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
- self.assertEquals(vbd_labels, [instance.name])
+ self.assertEquals(vbd_labels, [instance['name']])
+
+ # Ensure task states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
@@ -513,17 +562,19 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- session = xenapi_conn.XenAPISession(url, username, password)
+ url = CONF.xenapi_connection_url
+ username = CONF.xenapi_connection_username
+ password = CONF.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password,
+ fake.FakeVirtAPI())
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- session = xenapi_conn.XenAPISession(url, username, password)
+ url = CONF.xenapi_connection_url
+ username = CONF.xenapi_connection_username
+ password = CONF.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password,
+ fake.FakeVirtAPI())
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
@@ -542,7 +593,10 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
- create_record=True, empty_dns=False):
+ create_record=True, empty_dns=False,
+ image_meta={'id': IMAGE_VHD,
+ 'disk_format': 'vhd'},
+ block_device_info=None):
if injected_files is None:
injected_files = []
@@ -574,17 +628,15 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd'}
self.conn.spawn(self.context, instance, image_meta, injected_files,
- 'herp', network_info)
+ 'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, check_injection)
- self.assertTrue(instance.os_type)
- self.assertTrue(instance.architecture)
+ self.assertTrue(instance['os_type'])
+ self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
- """Test spawning with an empty dns list"""
+ # Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
@@ -673,6 +725,16 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
+ def test_spawn_boot_from_volume_no_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ image_meta={}, block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_with_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ block_device_info=dev_info)
+
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
@@ -774,7 +836,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
- host=FLAGS.host,
+ host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id)
@@ -796,7 +858,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
str(3 * 10 * 1024))
def test_spawn_injected_files(self):
- """Test spawning with injected_files"""
+ # Test spawning with injected_files.
actual_injected_files = []
def fake_inject_file(self, method, args):
@@ -816,8 +878,9 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_rescue(self):
instance = self._create_instance()
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
- vm_ref = vm_utils.lookup(session, instance.name)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
+ vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
@@ -845,7 +908,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
- rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
+ rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
+ 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
@@ -863,7 +927,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def __init__(self):
self.finish_revert_migration_called = False
- def finish_revert_migration(self, instance):
+ def finish_revert_migration(self, instance, block_info):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
@@ -882,23 +946,24 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
conn.reboot(instance, None, "SOFT")
def test_reboot_halted(self):
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(instance.name, 'Halted')
+ xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(instance, None, "SOFT")
- vm_ref = vm_utils.lookup(session, instance.name)
+ vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEquals(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(instance.name, 'Unknown')
+ xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
- def test_maintenance_mode(self):
+ def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
@@ -912,9 +977,19 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
- # Always find the 'bar' destination host
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [{'fake': 'aggregate'}]
+ else:
+ return []
+ self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
def fake_host_find(context, session, src, dst):
- return 'bar'
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
@@ -929,11 +1004,67 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
self.assertTrue(instance['task_state'], task_states.MIGRATING)
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
+ def test_session_virtapi(self):
+ was = {'called': False}
+
+ def fake_aggregate_get_by_host(self, *args, **kwargs):
+ was['called'] = True
+ raise Exception()
+ self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+
+ self.stubs.Set(self.conn._session, "is_slave", True)
+
+ try:
+ self.conn._session._get_host_uuid()
+ except Exception:
+ pass
+ self.assertTrue(was['called'])
+
+ def test_per_instance_usage_running(self):
+ instance = self._create_instance(spawn=True)
+ instance_type = instance_types.get_instance_type(3)
+
+ expected = {instance['uuid']: {'memory_mb': instance_type['memory_mb'],
+ 'uuid': instance['uuid']}}
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ # Paused instances still consume resources:
+ self.conn.pause(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ def test_per_instance_usage_suspended(self):
+ # Suspended instances do not consume memory:
+ instance = self._create_instance(spawn=True)
+ self.conn.suspend(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def test_per_instance_usage_halted(self):
+ instance = self._create_instance(spawn=True)
+ self.conn.power_off(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
+ 'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
@@ -1005,7 +1136,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
- xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ xenapi_fake.create_network('fake', CONF.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
@@ -1081,7 +1212,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
@@ -1089,7 +1220,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
def fake_raise(*args, **kwargs):
@@ -1129,7 +1260,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
@@ -1164,7 +1295,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
@@ -1186,7 +1317,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
@@ -1203,17 +1334,31 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
- image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
+ def test_migrate_no_auto_disk_config_no_resize_down(self):
+ # Resize down should fail when auto_disk_config not set.
+ instance_values = self.instance_values
+ instance_values['root_gb'] = 40
+ instance_values['auto_disk_config'] = False
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ instance_type = db.instance_type_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ResizeError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', instance_type, None)
+
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
- """Can convert from type id to type string."""
+ # Can convert from type id to type string.
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
@@ -1257,26 +1402,60 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
+ def test_none(self):
+ image_meta = None
+ self.assert_disk_type(image_meta, None)
+
+
+class XenAPIDetermineIsPVTestCase(test.TestCase):
+ """Unit tests for code that detects the PV status based on ImageType."""
+ def assert_pv_status(self, disk_image_type, os_type, expected_pv_status):
+ session = None
+ vdi_ref = None
+ actual = vm_utils.determine_is_pv(session, vdi_ref,
+ disk_image_type, os_type)
+ self.assertEqual(expected_pv_status, actual)
+
+ def test_windows_vhd(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'windows', False)
+
+ def test_linux_vhd(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'linux', True)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_raw(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_RAW, 'linux', True)
+
+ def test_disk(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK, None, True)
+
+ def test_iso(self):
+ self.assert_pv_status(vm_utils.ImageType.DISK_ISO, None, False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_none(self):
+ self.assert_pv_status(None, None, True)
+
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
- """Test that cmp_version compares a as less than b"""
+ # Test that cmp_version compares a as less than b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
- """Test that cmp_version compares a as greater than b"""
+ # Test that cmp_version compares a as greater than b.
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
- """Test that cmp_version compares a as equal to b"""
+ # Test that cmp_version compares a as equal to b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
- """Test that cmp_version compares non-lexically"""
+ # Test that cmp_version compares non-lexically.
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
- """Test that cmp_version compares by length as last resort"""
+ # Test that cmp_version compares by length as last resort.
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
@@ -1415,7 +1594,8 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
fake_resize_part_and_fs)
ctx = context.RequestContext(self.user_id, self.project_id)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = db.instance_create(self.context, self.instance_values)
@@ -1439,7 +1619,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
- """Should not partition unless fail safes pass"""
+ # Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
@@ -1465,7 +1645,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
class XenAPIGenerateLocal(stubs.XenAPITestBase):
- """Test generating of local disks, like swap and ephemeral"""
+ """Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(xenapi_connection_url='test_url',
@@ -1501,7 +1681,8 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
def assertCalled(self, instance):
ctx = context.RequestContext(self.user_id, self.project_id)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
@@ -1516,7 +1697,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.assertTrue(self.called)
def test_generate_swap(self):
- """Test swap disk generation."""
+ # Test swap disk generation.
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 5})
@@ -1533,7 +1714,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.assertCalled(instance)
def test_generate_ephemeral(self):
- """Test ephemeral disk generation."""
+ # Test ephemeral disk generation.
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 4})
@@ -1599,19 +1780,12 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
}
def test_get_all_bw_counters(self):
- class testinstance(object):
- def __init__(self, name, uuid):
- self.name = name
- self.uuid = uuid
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth)
- result = self.conn.get_all_bw_counters([testinstance(
- name='test1',
- uuid='1-2-3'),
- testinstance(
- name='test2',
- uuid='4-5-6')])
+ result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
@@ -1635,14 +1809,11 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
- class testinstance(object):
- def __init__(self):
- self.name = "instance-0001"
- self.uuid = "1-2-3-4-5"
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt)
- result = self.conn.get_all_bw_counters([testinstance()])
+ result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
@@ -1704,7 +1875,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
- self.network = importutils.import_object(FLAGS.network_manager)
+ self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
@@ -1955,18 +2126,20 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
- """Ensure StorageRepositoryNotFound is raise when wrong filter."""
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
- """Ensure the default local-storage is found."""
+ # Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(
name_label='Fake Storage',
@@ -1979,10 +2152,11 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
- """Ensure the SR is found when using a different filter."""
+ # Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
@@ -1992,10 +2166,11 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
- """Ensure the default SR is found regardless of other-config."""
+ # Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
pool_ref = xenapi_fake.create_pool('')
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
@@ -2011,8 +2186,7 @@ def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': avail_zone})
+ 'report_count': 0})
return values
@@ -2027,19 +2201,22 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
- compute_driver='xenapi.XenAPIDriver')
+ compute_driver='xenapi.XenAPIDriver',
+ default_availability_zone='avail_zone1')
+ self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
- 'availability_zone': 'test_zone',
- 'metadata': {pool_states.POOL_FLAG: 'XenAPI'}}
+ 'metadata': {'availability_zone': 'test_zone',
+ pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
+ 'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
@@ -2090,12 +2267,13 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
- result = db.aggregate_get(self.context, aggregate.id)
+ result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
- self.assertDictMatch(self.fake_metadata, result.metadetails)
+ self.assertThat(self.fake_metadata,
+ matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
- """Ensure join_slave gets called when the request gets to master."""
+ # Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
@@ -2118,16 +2296,17 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
- "availability_zone": 'fake_zone'}
+ 'metadata': {'availability_zone': 'fake_zone'}}
result = db.aggregate_create(self.context, values)
- metadata = {pool_states.POOL_FLAG: "XenAPI",
+ metadata = {'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
- db.aggregate_metadata_add(self.context, result.id, metadata)
+ db.aggregate_metadata_add(self.context, result['id'], metadata)
- db.aggregate_host_add(self.context, result.id, "host")
- aggregate = db.aggregate_get(self.context, result.id)
- self.assertEqual(["host"], aggregate.hosts)
- self.assertEqual(metadata, aggregate.metadetails)
+ db.aggregate_host_add(self.context, result['id'], "host")
+ aggregate = db.aggregate_get(self.context, result['id'])
+ self.assertEqual(["host"], aggregate['hosts'])
+ self.assertEqual(metadata, aggregate['metadetails'])
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
@@ -2149,7 +2328,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.context, result, "test_host")
def test_remove_slave(self):
- """Ensure eject slave gets called."""
+ # Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
@@ -2161,20 +2340,22 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
- """Ensure metadata are cleared after removal."""
+ # Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
- result = db.aggregate_get(self.context, aggregate.id)
+ result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
- self.assertDictMatch({pool_states.POOL_FLAG: 'XenAPI',
- pool_states.KEY: pool_states.ACTIVE}, result.metadetails)
+ self.assertThat({'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: pool_states.ACTIVE},
+ matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
- """Ensure AggregateError is raised if removing the master."""
+ # Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -2186,18 +2367,18 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
- values = {"name": aggr_name,
- "availability_zone": aggr_zone}
- result = db.aggregate_create(self.context, values)
+ values = {"name": aggr_name}
+ result = db.aggregate_create(self.context, values,
+ metadata={'availability_zone': aggr_zone})
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: aggr_state}
- db.aggregate_metadata_add(self.context, result.id, pool_flag)
+ db.aggregate_metadata_add(self.context, result['id'], pool_flag)
for host in hosts:
- db.aggregate_host_add(self.context, result.id, host)
+ db.aggregate_host_add(self.context, result['id'], host)
if metadata:
- db.aggregate_metadata_add(self.context, result.id, metadata)
- return db.aggregate_get(self.context, result.id)
+ db.aggregate_metadata_add(self.context, result['id'], metadata)
+ return db.aggregate_get(self.context, result['id'])
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
@@ -2224,7 +2405,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
- """Ensure we can remove a host from an aggregate even if in error."""
+ # Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -2262,30 +2443,25 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
- """Ensure the undo operation works correctly on add."""
+ # Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
- raise exception.AggregateError
+ raise exception.AggregateError(
+ aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(self.context, self.aggr.id, metadata)
- db.aggregate_host_add(self.context, self.aggr.id, 'fake_host')
+ db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
+ db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, "fake_host",
aggregate=jsonutils.to_primitive(self.aggr))
- excepted = db.aggregate_get(self.context, self.aggr.id)
- self.assertEqual(excepted.metadetails[pool_states.KEY],
+ excepted = db.aggregate_get(self.context, self.aggr['id'])
+ self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
- self.assertEqual(excepted.hosts, [])
-
-
-class Aggregate(object):
- def __init__(self, id=None, hosts=None):
- self.id = id
- self.hosts = hosts or []
+ self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
@@ -2306,7 +2482,7 @@ class MockComputeAPI(object):
class StubDependencies(object):
- """Stub dependencies for ResourcePool"""
+ """Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
@@ -2325,28 +2501,36 @@ class StubDependencies(object):
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
- """ A ResourcePool, use stub dependencies """
+ """A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.TestCase):
+ fake_aggregate = {
+ 'id': 98,
+ 'hosts': [],
+ 'metadetails': {
+ 'master_compute': 'master',
+ pool_states.POOL_FLAG: {},
+ pool_states.KEY: {}
+ }
+ }
+
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
- aggregate = Aggregate(id=98, hosts=[])
- slave.add_to_aggregate("CONTEXT", aggregate, "slave")
+ slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
- "CONTEXT", jsonutils.to_primitive(aggregate),
+ "CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
- aggregate = Aggregate(id=98, hosts=[])
- slave.remove_from_aggregate("CONTEXT", aggregate, "slave")
+ slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
@@ -2550,7 +2734,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
self.metadetails = {"host": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
- self.assertEqual(FLAGS.host, host)
+ self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
@@ -2567,7 +2751,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
self.metadetails = {"dest_other": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
- self.assertEqual(FLAGS.host, host)
+ self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 328cd8d67..6437f9537 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -18,10 +18,13 @@ import platform
import nova.context
import nova.db
-import nova.flags
from nova.image import glance
+from nova.network import minidns
+from nova.network import model as network_model
+from nova.openstack.common import cfg
-FLAGS = nova.flags.FLAGS
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
@@ -74,11 +77,12 @@ def get_test_instance(context=None):
def get_test_network_info(count=1):
- ipv6 = FLAGS.use_ipv6
+ ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0'
fake_ip_3 = '0.0.0.1/0'
+ fake_netmask = '255.255.255.255'
fake_vlan = 100
fake_bridge_interface = 'eth0'
network = {'bridge': fake,
@@ -88,12 +92,16 @@ def get_test_network_info(count=1):
'bridge_interface': fake_bridge_interface,
'injected': False}
mapping = {'mac': fake,
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
'dhcp_server': fake,
+ 'dns': ['fake1', 'fake2'],
'gateway': fake,
'gateway_v6': fake,
- 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
+ 'ips': [{'ip': fake_ip, 'netmask': fake_netmask},
+ {'ip': fake_ip, 'netmask': fake_netmask}]}
if ipv6:
- mapping['ip6s'] = [{'ip': fake_ip},
+ mapping['ip6s'] = [{'ip': fake_ip, 'netmask': fake_netmask},
{'ip': fake_ip_2},
{'ip': fake_ip_3}]
return [(network, mapping) for x in xrange(0, count)]
@@ -101,3 +109,20 @@ def get_test_network_info(count=1):
def is_osx():
return platform.mac_ver()[0] != ''
+
+
+test_dns_managers = []
+
+
+def dns_manager():
+ global test_dns_managers
+ manager = minidns.MiniDNS()
+ test_dns_managers.append(manager)
+ return manager
+
+
+def cleanup_dns_managers():
+ global test_dns_managers
+ for manager in test_dns_managers:
+ manager.delete_dns_file()
+ test_dns_managers = []
diff --git a/nova/tests/virt/__init__.py b/nova/tests/virt/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/virt/__init__.py
diff --git a/nova/tests/runtime_flags.py b/nova/tests/virt/disk/__init__.py
index 7cc8c3219..e07f7d348 100644
--- a/nova/tests/runtime_flags.py
+++ b/nova/tests/virt/disk/__init__.py
@@ -1,7 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2012 Michael Still
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,9 +14,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
-from nova import flags
-from nova.openstack.common import cfg
-
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag'))
diff --git a/nova/tests/virt/disk/test_api.py b/nova/tests/virt/disk/test_api.py
new file mode 100644
index 000000000..15fb2fc2f
--- /dev/null
+++ b/nova/tests/virt/disk/test_api.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tempfile
+
+import fixtures
+
+from nova import test
+from nova.virt.disk import api
+
+
+class APITestCase(test.TestCase):
+
+ def test_can_resize_need_fs_type_specified(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # treat a failure to mount as a failure to be able to resize the
+ # filesystem
+ def _fake_get_disk_size(path):
+ return 10
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.api.get_disk_size', _fake_get_disk_size))
+
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.get_dev',
+ fake_returns_true))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.map_dev',
+ fake_returns_true))
+
+ # Force the use of localfs, which is what was used during the failure
+ # reported in the bug
+ def fake_import_fails(*args, **kwargs):
+ raise Exception('Failed')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.openstack.common.importutils.import_module',
+ fake_import_fails))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ self.assertFalse(api.can_resize_fs(imgfile, 100, use_cow=True))
diff --git a/nova/tests/virt/disk/test_loop.py b/nova/tests/virt/disk/test_loop.py
new file mode 100644
index 000000000..dedd1377d
--- /dev/null
+++ b/nova/tests/virt/disk/test_loop.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Michael Still
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+
+from nova import test
+from nova.virt.disk.mount import loop
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+def _fake_trycmd_losetup_works(*args, **kwargs):
+ return '/dev/loop0', ''
+
+
+def _fake_trycmd_losetup_fails(*args, **kwards):
+ return '', 'doh'
+
+
+class LoopTestCase(test.TestCase):
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_works))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
+ _fake_noop))
+
+ # No error logged, device consumed
+ self.assertTrue(l.get_dev())
+ self.assertTrue(l.linked)
+ self.assertEquals('', l.error)
+ self.assertEquals('/dev/loop0', l.device)
+
+ # Free
+ l.unget_dev()
+ self.assertFalse(l.linked)
+ self.assertEquals('', l.error)
+ self.assertEquals(None, l.device)
+
+ def test_inner_get_dev_fails(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_fails))
+
+ # No error logged, device consumed
+ self.assertFalse(l._inner_get_dev())
+ self.assertFalse(l.linked)
+ self.assertNotEquals('', l.error)
+ self.assertEquals(None, l.device)
+
+ # Free
+ l.unget_dev()
+ self.assertFalse(l.linked)
+ self.assertEquals(None, l.device)
+
+ def test_get_dev_timeout(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_fails))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.'
+ 'MAX_DEVICE_WAIT'), -10))
+
+ # Always fail to get a device
+ def fake_get_dev_fails():
+ return False
+ l._inner_get_dev = fake_get_dev_fails
+
+ # Fail to get a device
+ self.assertFalse(l.get_dev())
+
+ def test_unget_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
+ _fake_noop))
+
+ # This just checks that a free of something we don't have doesn't
+ # throw an exception
+ l.unget_dev()
diff --git a/nova/tests/virt/disk/test_nbd.py b/nova/tests/virt/disk/test_nbd.py
new file mode 100644
index 000000000..59b0784d9
--- /dev/null
+++ b/nova/tests/virt/disk/test_nbd.py
@@ -0,0 +1,292 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import tempfile
+
+import fixtures
+
+from nova import test
+from nova.virt.disk.mount import nbd
+
+ORIG_EXISTS = os.path.exists
+ORIG_LISTDIR = os.listdir
+
+
+def _fake_exists_no_users(path):
+ if path.startswith('/sys/block/nbd'):
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_listdir_nbd_devices(path):
+ if path.startswith('/sys/block'):
+ return ['nbd0', 'nbd1']
+ return ORIG_LISTDIR(path)
+
+
+def _fake_exists_no_users(path):
+ if path.startswith('/sys/block/nbd'):
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_exists_all_used(path):
+ if path.startswith('/sys/block/nbd'):
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_detect_nbd_devices_none(self):
+ return []
+
+
+def _fake_detect_nbd_devices(self):
+ return ['nbd0', 'nbd1']
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class NbdTestCase(test.TestCase):
+ def setUp(self):
+ super(NbdTestCase, self).setUp()
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices)
+ self.useFixture(fixtures.MonkeyPatch('os.listdir',
+ _fake_listdir_nbd_devices))
+
+ def test_nbd_no_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices_none)
+ n = nbd.NbdMount(None, tempdir)
+ self.assertEquals(None, n._allocate_nbd())
+
+ def test_nbd_no_free_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_all_used))
+ self.assertEquals(None, n._allocate_nbd())
+
+ def test_nbd_not_loaded(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+
+ # Fake out os.path.exists
+ def fake_exists(path):
+ if path.startswith('/sys/block/nbd'):
+ return False
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
+
+ # This should fail, as we don't have the module "loaded"
+ # TODO(mikal): work out how to force english as the gettext language
+ # so that the error check always passes
+ self.assertEquals(None, n._allocate_nbd())
+ self.assertEquals('nbd unavailable: module not loaded', n.error)
+
+ def test_nbd_allocation(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+
+ # Allocate a nbd device
+ self.assertEquals('/dev/nbd0', n._allocate_nbd())
+
+ def test_nbd_allocation_one_in_use(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+
+ # Fake out os.path.exists
+ def fake_exists(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd0/pid':
+ return True
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
+
+ # Allocate a nbd device, should not be the in use one
+ # TODO(mikal): Note that there is a leak here, as the in use nbd device
+ # is removed from the list, but not returned so it will never be
+ # re-added. I will fix this in a later patch.
+ self.assertEquals('/dev/nbd1', n._allocate_nbd())
+
+ def test_inner_get_dev_no_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.assertFalse(n._inner_get_dev())
+
+ def test_inner_get_dev_qemu_fails(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+
+ # We have a trycmd that always fails
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ # Error logged, no device consumed
+ self.assertFalse(n._inner_get_dev())
+ self.assertTrue(n.error.startswith('qemu-nbd error'))
+
+ def test_inner_get_dev_qemu_timeout(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+
+ # We have a trycmd that always passed
+ def fake_trycmd(*args, **kwargs):
+ return '', ''
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+
+ # Error logged, no device consumed
+ self.assertFalse(n._inner_get_dev())
+ self.assertTrue(n.error.endswith('did not show up'))
+
+ def fake_exists_one(self, path):
+ # We need the pid file for the device which is allocated to exist, but
+ # only once it is allocated to us
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd1/pid':
+ return False
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+ def fake_trycmd_creates_pid(self, *args, **kwargs):
+ def fake_exists_two(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd0/pid':
+ return True
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ fake_exists_two))
+ return '', ''
+
+ def test_inner_get_dev_works(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+
+ # No error logged, device consumed
+ self.assertTrue(n._inner_get_dev())
+ self.assertTrue(n.linked)
+ self.assertEquals('', n.error)
+ self.assertEquals('/dev/nbd0', n.device)
+
+ # Free
+ n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEquals('', n.error)
+ self.assertEquals(None, n.device)
+
+ def test_unget_dev_simple(self):
+ # This test is just checking we don't get an exception when we unget
+ # something we don't have
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ n.unget_dev()
+
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+
+ # No error logged, device consumed
+ self.assertTrue(n.get_dev())
+ self.assertTrue(n.linked)
+ self.assertEquals('', n.error)
+ self.assertEquals('/dev/nbd0', n.device)
+
+ # Free
+ n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEquals('', n.error)
+ self.assertEquals(None, n.device)
+
+ def test_get_dev_timeout(self):
+ # Always fail to get a device
+ def fake_get_dev_fails(self):
+ return False
+ self.stubs.Set(nbd.NbdMount, '_inner_get_dev', fake_get_dev_fails)
+
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.'
+ 'MAX_DEVICE_WAIT'), -10))
+
+ # No error logged, device consumed
+ self.assertFalse(n.get_dev())
+
+ def test_do_mount_need_to_specify_fs_type(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # communicate a failed mount properly.
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ mount = nbd.NbdMount(imgfile.name, tempdir)
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ mount.get_dev = fake_returns_true
+ mount.map_dev = fake_returns_true
+
+ self.assertFalse(mount.do_mount())
diff --git a/nova/tests/virt/xenapi/__init__.py b/nova/tests/virt/xenapi/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/virt/xenapi/__init__.py
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
new file mode 100644
index 000000000..275088af0
--- /dev/null
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import fixtures
+import mox
+import uuid
+
+from nova import test
+from nova.tests.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi import vm_utils
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class GenerateConfigDriveTestCase(test.TestCase):
+ def test_no_admin_pass(self):
+ # This is here to avoid masking errors, it shouldn't be used normally
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.xenapi.vm_utils.destroy_vdi', _fake_noop))
+
+ # Mocks
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * 1024 * 1024).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice')
diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py
new file mode 100644
index 000000000..7cc5c70da
--- /dev/null
+++ b/nova/tests/virt/xenapi/test_volumeops.py
@@ -0,0 +1,150 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests.xenapi import stubs
+from nova.virt.xenapi import volumeops
+
+
+class VolumeAttachTestCase(test.TestCase):
+ def test_detach_volume_call(self):
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, '_is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.vm_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils._is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref')
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
+ def test_attach_volume_call(self):
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(ops, '_connect_volume')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+
+ volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ ops._connect_volume(
+ 'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=True)
+
+ self.mox.ReplayAll()
+ ops.attach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
+ def test_attach_volume_no_hotplug(self):
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(ops, '_connect_volume')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+
+ volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ ops._connect_volume(
+ 'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=False)
+
+ self.mox.ReplayAll()
+ ops.attach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint', hotplug=False)
+
+ def test_connect_volume_no_hotplug(self):
+ session = stubs.FakeSessionForVolumeTests('fake_uri')
+ ops = volumeops.VolumeOps(session)
+ instance_name = 'instance_1'
+ sr_uuid = '1'
+ sr_label = 'Disk-for:%s' % instance_name
+ sr_params = ''
+ sr_ref = 'sr_ref'
+ vdi_uuid = '2'
+ vdi_ref = 'vdi_ref'
+ vbd_ref = 'vbd_ref'
+ connection_data = {'vdi_uuid': vdi_uuid}
+ vm_ref = 'vm_ref'
+ dev_number = 1
+
+ called = {'xenapi': False}
+
+ def fake_call_xenapi(self, *args, **kwargs):
+ # Only used for VBD.plug in this code path.
+ called['xenapi'] = True
+ raise Exception()
+
+ self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
+
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
+ self.mox.StubOutWithMock(
+ volumeops.volume_utils, 'introduce_sr_unless_present')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
+
+ volumeops.volume_utils.parse_sr_info(
+ connection_data, sr_label).AndReturn(
+ tuple([sr_uuid, sr_label, sr_params]))
+
+ volumeops.volume_utils.introduce_sr_unless_present(
+ session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
+
+ volumeops.volume_utils.introduce_vdi(
+ session, sr_ref, vdi_uuid, None).AndReturn(vdi_ref)
+
+ volumeops.vm_utils.create_vbd(
+ session, vm_ref, vdi_ref, dev_number,
+ bootable=False, osvol=True).AndReturn(vbd_ref)
+
+ self.mox.ReplayAll()
+
+ ops._connect_volume(connection_data, dev_number, instance_name,
+ vm_ref, hotplug=False)
+
+ self.assertEquals(False, called['xenapi'])
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index a469c4706..dd19f4929 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -20,6 +20,7 @@ Stubouts, mocks and fixtures for the test suite
"""
import time
+import uuid
from nova.compute import task_states
from nova.compute import vm_states
@@ -62,7 +63,7 @@ def stub_out_db_instance_api(stubs):
base_options = {
'name': values['name'],
'id': values['id'],
- 'uuid': utils.gen_uuid(),
+ 'uuid': uuid.uuid4(),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py
index 494b201d0..0330246e2 100644
--- a/nova/tests/vmwareapi/stubs.py
+++ b/nova/tests/vmwareapi/stubs.py
@@ -21,31 +21,31 @@ Stubouts for the test suite
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
- """Stubs out the VMWareAPISession's get_vim_object method."""
+ """Stubs out the VMwareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
- """Stubs out the VMWareAPISession's is_vim_object method."""
+ """Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def set_stubs(stubs):
"""Set the stubs."""
- stubs.Set(vmops.VMWareVMOps, 'plug_vifs', fake.fake_plug_vifs)
- stubs.Set(network_utils, 'get_network_with_the_name',
+ stubs.Set(vmops.VMwareVMOps, 'plug_vifs', fake.fake_plug_vifs)
+ stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
- stubs.Set(driver.VMWareAPISession, "_get_vim_object",
+ stubs.Set(driver.VMwareAPISession, "_get_vim_object",
fake_get_vim_object)
- stubs.Set(driver.VMWareAPISession, "_is_vim_object",
+ stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 560e12d70..85c85b5e2 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
import pickle
import random
@@ -54,7 +54,7 @@ def stubout_instance_snapshot(stubs):
def stubout_session(stubs, cls, product_version=(5, 6, 2),
product_brand='XenServer', **opt_args):
- """Stubs out methods from XenAPISession"""
+ """Stubs out methods from XenAPISession."""
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
lambda s, url: cls(url, **opt_args))
stubs.Set(xenapi_conn.XenAPISession, '_get_product_version_and_brand',
@@ -90,7 +90,7 @@ def stubout_is_vdi_pv(stubs):
def stubout_determine_is_pv_objectstore(stubs):
- """Assumes VMs stu have PV kernels"""
+ """Assumes VMs stu have PV kernels."""
def f(*args):
return False
@@ -98,7 +98,7 @@ def stubout_determine_is_pv_objectstore(stubs):
def stubout_is_snapshot(stubs):
- """ Always returns true
+ """Always returns true
xenapi fake driver does not create vmrefs for snapshots """
def f(*args):
@@ -158,7 +158,7 @@ def _make_fake_vdi():
class FakeSessionForVMTests(fake.SessionBase):
- """ Stubs out a XenAPISession for VM tests """
+ """Stubs out a XenAPISession for VM tests."""
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
@@ -204,7 +204,7 @@ class FakeSessionForVMTests(fake.SessionBase):
class FakeSessionForFirewallTests(FakeSessionForVMTests):
- """ Stubs out a XenApi Session for doing IPTable Firewall tests """
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
@@ -270,7 +270,7 @@ def stub_out_vm_methods(stubs):
class FakeSessionForVolumeTests(fake.SessionBase):
- """ Stubs out a XenAPISession for Volume tests """
+ """Stubs out a XenAPISession for Volume tests."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
@@ -284,7 +284,7 @@ class FakeSessionForVolumeTests(fake.SessionBase):
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
- """ Stubs out a XenAPISession for Volume tests: it injects failures """
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
@@ -352,6 +352,6 @@ class XenAPITestBase(test.TestCase):
def setUp(self):
super(XenAPITestBase, self).setUp()
- self.stub_module('XenAPI', fake)
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
fake.reset()
diff --git a/nova/tests/xenapi/test_vm_utils.py b/nova/tests/xenapi/test_vm_utils.py
index c78d42fd3..6d7f9a624 100644
--- a/nova/tests/xenapi/test_vm_utils.py
+++ b/nova/tests/xenapi/test_vm_utils.py
@@ -1,11 +1,36 @@
import mox
+from nova import context
+from nova import db
from nova import exception
+from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
-import unittest
+
+
+XENSM_TYPE = 'xensm'
+ISCSI_TYPE = 'iscsi'
+
+
+def get_fake_dev_params(sr_type):
+ fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
+ 'name_label': 'fake_storage',
+ 'name_description': 'test purposes',
+ 'server': 'myserver',
+ 'serverpath': '/local/scratch/myname',
+ 'sr_type': 'nfs',
+ 'introduce_sr_keys': ['server',
+ 'serverpath',
+ 'sr_type'],
+ 'vdi_uuid': 'falseVDI'},
+ ISCSI_TYPE: {'volume_id': 'fake_volume_id',
+ 'target_lun': 1,
+ 'target_iqn': 'fake_iqn:volume-fake_volume_id',
+ 'target_portal': u'localhost:3260',
+ 'target_discovered': False}, }
+ return fakes[sr_type]
class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
@@ -48,15 +73,8 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
self.assertEquals([], result)
- def test_get_vdis_for_boot_from_vol(self):
- dev_params = {'sr_uuid': 'falseSR',
- 'name_label': 'fake_storage',
- 'name_description': 'test purposes',
- 'server': 'myserver',
- 'serverpath': '/local/scratch/myname',
- 'sr_type': 'nfs',
- 'introduce_sr_keys': ['server', 'serverpath', 'sr_type'],
- 'vdi_uuid': 'falseVDI'}
+ def test_get_vdis_for_boot_from_vol_with_sr_uuid(self):
+ dev_params = get_fake_dev_params(XENSM_TYPE)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
@@ -72,20 +90,22 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
return None
self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr)
- dev_params = {'sr_uuid': 'falseSR',
- 'name_label': 'fake_storage',
- 'name_description': 'test purposes',
- 'server': 'myserver',
- 'serverpath': '/local/scratch/myname',
- 'sr_type': 'nfs',
- 'introduce_sr_keys': ['server', 'serverpath', 'sr_type'],
- 'vdi_uuid': 'falseVDI'}
+ dev_params = get_fake_dev_params(XENSM_TYPE)
self.assertRaises(exception.NovaException,
vm_utils.get_vdis_for_boot_from_vol,
driver._session, dev_params)
+ def test_get_vdis_for_boot_from_iscsi_vol_missing_sr_uuid(self):
+ dev_params = get_fake_dev_params(ISCSI_TYPE)
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
-class VMRefOrRaiseVMFoundTestCase(unittest.TestCase):
+ result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
+ dev_params)
+ self.assertNotEquals(result['root']['uuid'], None)
+
+
+class VMRefOrRaiseVMFoundTestCase(test.TestCase):
def test_lookup_call(self):
mock = mox.Mox()
@@ -109,7 +129,7 @@ class VMRefOrRaiseVMFoundTestCase(unittest.TestCase):
mock.VerifyAll()
-class VMRefOrRaiseVMNotFoundTestCase(unittest.TestCase):
+class VMRefOrRaiseVMNotFoundTestCase(test.TestCase):
def test_exception_raised(self):
mock = mox.Mox()
@@ -137,3 +157,50 @@ class VMRefOrRaiseVMNotFoundTestCase(unittest.TestCase):
self.assertTrue(
'somename' in str(e))
mock.VerifyAll()
+
+
+class BittorrentTestCase(stubs.XenAPITestBase):
+ def setUp(self):
+ super(BittorrentTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ def test_image_uses_bittorrent(self):
+ sys_meta = {'image_bittorrent': True}
+ instance = db.instance_create(self.context,
+ {'system_metadata': sys_meta})
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.flags(xenapi_torrent_images='some')
+ self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
+ instance))
+
+ def _test_create_image(self, cache_type):
+ sys_meta = {'image_cache_in_nova': True}
+ instance = db.instance_create(self.context,
+ {'system_metadata': sys_meta})
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.flags(cache_images=cache_type)
+
+ was = {'called': None}
+
+ def fake_create_cached_image(*args):
+ was['called'] = 'some'
+ return {}
+ self.stubs.Set(vm_utils, '_create_cached_image',
+ fake_create_cached_image)
+
+ def fake_fetch_image(*args):
+ was['called'] = 'none'
+ return {}
+ self.stubs.Set(vm_utils, '_fetch_image',
+ fake_fetch_image)
+
+ vm_utils._create_image(self.context, None, instance,
+ 'foo', 'bar', 'baz')
+
+ self.assertEqual(was['called'], cache_type)
+
+ def test_create_image_cached(self):
+ self._test_create_image('some')
+
+ def test_create_image_uncached(self):
+ self._test_create_image('none')
diff --git a/nova/utils.py b/nova/utils.py
index 398c620f8..115791b64 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -29,7 +29,6 @@ import os
import pyclbr
import random
import re
-import shlex
import shutil
import signal
import socket
@@ -37,31 +36,52 @@ import struct
import sys
import tempfile
import time
-import uuid
-import weakref
from xml.sax import saxutils
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
-from eventlet import semaphore
import netaddr
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+monkey_patch_opts = [
+ cfg.BoolOpt('monkey_patch',
+ default=False,
+ help='Whether to log monkey patching'),
+ cfg.ListOpt('monkey_patch_modules',
+ default=[
+ 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
+ 'nova.compute.api:nova.notifier.api.notify_decorator'
+ ],
+ help='List of modules/decorators to monkey patch'),
+]
+utils_opts = [
+ cfg.IntOpt('password_length',
+ default=12,
+ help='Length of generated instance admin passwords'),
+ cfg.BoolOpt('disable_process_locking',
+ default=False,
+ help='Whether to disable inter-process locks'),
+ cfg.StrOpt('instance_usage_audit_period',
+ default='month',
+ help='time period to generate instance usages for. '
+ 'Time period must be hour, day, month or year'),
+ cfg.StrOpt('rootwrap_config',
+ default="/etc/nova/rootwrap.conf",
+ help='Path to the rootwrap configuration file to use for '
+ 'running commands as root'),
+]
+CONF = cfg.CONF
+CONF.register_opts(monkey_patch_opts)
+CONF.register_opts(utils_opts)
LOG = logging.getLogger(__name__)
-CONF = config.CONF
-CONF.register_opt(
- cfg.BoolOpt('disable_process_locking', default=False,
- help='Whether to disable inter-process locks'))
# Used for looking up extensions of text
# to their 'multiplied' byte amount
@@ -180,12 +200,20 @@ def execute(*cmd, **kwargs):
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
+
+ if os.name == 'nt':
+ preexec_fn = None
+ close_fds = False
+ else:
+ preexec_fn = _subprocess_setup
+ close_fds = True
+
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
- close_fds=True,
- preexec_fn=_subprocess_setup,
+ close_fds=close_fds,
+ preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
@@ -401,7 +429,7 @@ def last_completed_audit_period(unit=None, before=None):
return (begin, end)
-def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
+def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
@@ -410,6 +438,9 @@ def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
Believed to be reasonably secure (with a reasonable password length!)
"""
+ if length is None:
+ length = CONF.password_length
+
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
@@ -521,12 +552,23 @@ class LoopingCallDone(Exception):
self.retvalue = retvalue
-class LoopingCall(object):
+class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
+ self.done = None
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
+
+
+class FixedIntervalLoopingCall(LoopingCallBase):
+ """A looping call which happens at a fixed interval."""
def start(self, interval, initial_delay=None):
self._running = True
@@ -546,7 +588,7 @@ class LoopingCall(object):
self.stop()
done.send(e.retvalue)
except Exception:
- LOG.exception(_('in looping call'))
+ LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
@@ -557,11 +599,47 @@ class LoopingCall(object):
greenthread.spawn(_inner)
return self.done
- def stop(self):
- self._running = False
- def wait(self):
- return self.done.wait()
+class DynamicLoopingCall(LoopingCallBase):
+ """A looping call which happens sleeps until the next known event.
+
+ The function called should return how long to sleep for before being
+ called again.
+ """
+
+ def start(self, initial_delay=None, periodic_interval_max=None):
+ self._running = True
+ done = event.Event()
+
+ def _inner():
+ if initial_delay:
+ greenthread.sleep(initial_delay)
+
+ try:
+ while self._running:
+ idle = self.f(*self.args, **self.kw)
+ if not self._running:
+ break
+
+ if not periodic_interval_max is None:
+ idle = min(idle, periodic_interval_max)
+ LOG.debug(_('Periodic task processor sleeping for %.02f '
+ 'seconds'), idle)
+ greenthread.sleep(idle)
+ except LoopingCallDone, e:
+ self.stop()
+ done.send(e.retvalue)
+ except Exception:
+ LOG.exception(_('in dynamic looping call'))
+ done.send_exception(*sys.exc_info())
+ return
+ else:
+ done.send(True)
+
+ self.done = done
+
+ greenthread.spawn(_inner)
+ return self.done
def xhtml_escape(value):
@@ -613,7 +691,7 @@ def to_bytes(text, default=0):
def delete_if_exists(pathname):
- """delete a file, but ignore file not found error"""
+ """delete a file, but ignore file not found error."""
try:
os.unlink(pathname)
@@ -768,12 +846,8 @@ def parse_server_string(server_str):
return ('', '')
-def gen_uuid():
- return uuid.uuid4()
-
-
def bool_from_str(val):
- """Convert a string representation of a bool into a bool value"""
+ """Convert a string representation of a bool into a bool value."""
if not val:
return False
@@ -786,7 +860,7 @@ def bool_from_str(val):
def is_valid_boolstr(val):
- """Check if the provided string is a valid bool string or not. """
+ """Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return val == 'true' or val == 'false' or \
val == 'yes' or val == 'no' or \
@@ -835,7 +909,7 @@ def is_valid_cidr(address):
def monkey_patch():
- """ If the Flags.monkey_patch set as True,
+ """If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
@@ -875,7 +949,7 @@ def monkey_patch():
def convert_to_list_dict(lst, label):
- """Convert a value or list into a list of dicts"""
+ """Convert a value or list into a list of dicts."""
if not lst:
return None
if not isinstance(lst, list):
@@ -884,7 +958,7 @@ def convert_to_list_dict(lst, label):
def timefunc(func):
- """Decorator that logs how long a particular function took to execute"""
+ """Decorator that logs how long a particular function took to execute."""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
@@ -897,18 +971,6 @@ def timefunc(func):
return inner
-def generate_glance_url():
- """Generate the URL to glance."""
- # TODO(jk0): This will eventually need to take SSL into consideration
- # when supported in glance.
- return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
-
-
-def generate_image_url(image_ref):
- """Generate an image URL from an image_ref."""
- return "%s/images/%s" % (generate_glance_url(), image_ref)
-
-
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
@@ -937,7 +999,7 @@ def make_dev_path(dev, partition=None, base='/dev'):
def total_seconds(td):
- """Local total_seconds implementation for compatibility with python 2.6"""
+ """Local total_seconds implementation for compatibility with python 2.6."""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
@@ -1027,14 +1089,6 @@ def temporary_mutation(obj, **kwargs):
setattr(obj, attr, old_value)
-def service_is_up(service):
- """Check whether a service is up based on last heartbeat."""
- last_heartbeat = service['updated_at'] or service['created_at']
- # Timestamps in DB are UTC.
- elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
- return abs(elapsed) <= CONF.service_down_time
-
-
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
@@ -1112,7 +1166,7 @@ def strcmp_const_time(s1, s2):
def walk_class_hierarchy(clazz, encountered=None):
- """Walk class hierarchy, yielding most derived classes first"""
+ """Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
@@ -1160,17 +1214,38 @@ def mkfs(fs, path, label=None):
:param label: Volume label to use
"""
if fs == 'swap':
- execute('mkswap', path)
+ args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
- #add -F to force no interactive excute on non-block device.
- if fs in ('ext3', 'ext4'):
- args.extend(['-F'])
- if label:
- if fs in ('msdos', 'vfat'):
- label_opt = '-n'
- else:
- label_opt = '-L'
- args.extend([label_opt, label])
- args.append(path)
- execute(*args)
+ #add -F to force no interactive execute on non-block device.
+ if fs in ('ext3', 'ext4'):
+ args.extend(['-F'])
+ if label:
+ if fs in ('msdos', 'vfat'):
+ label_opt = '-n'
+ else:
+ label_opt = '-L'
+ args.extend([label_opt, label])
+ args.append(path)
+ execute(*args)
+
+
+def last_bytes(file_like_object, num):
+ """Return num bytes from the end of the file, and remaining byte count.
+
+ :param file_like_object: The file to read
+ :param num: The number of bytes to return
+
+ :returns (data, remaining)
+ """
+
+ try:
+ file_like_object.seek(-num, os.SEEK_END)
+ except IOError, e:
+ if e.errno == 22:
+ file_like_object.seek(0, os.SEEK_SET)
+ else:
+ raise
+
+ remaining = file_like_object.tell()
+ return (file_like_object.read(), remaining)
diff --git a/nova/version.py b/nova/version.py
index ad7670849..82f3bb970 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -14,10 +14,72 @@
# License for the specific language governing permissions and limitations
# under the License.
+NOVA_VENDOR = "OpenStack Foundation"
+NOVA_PRODUCT = "OpenStack Nova"
+NOVA_PACKAGE = None # OS distro package version suffix
NOVA_VERSION = ['2013', '1', None]
YEAR, COUNT, REVISION = NOVA_VERSION
FINAL = False # This becomes true at Release Candidate time
+loaded = False
+
+
+def _load_config():
+ # Don't load in global context, since we can't assume
+ # these modules are accessible when distutils uses
+ # this module
+ import ConfigParser
+
+ from nova.openstack.common import cfg
+ from nova.openstack.common import log as logging
+
+ global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
+ if loaded:
+ return
+
+ loaded = True
+
+ cfgfile = cfg.CONF.find_file("release")
+ if cfgfile is None:
+ return
+
+ try:
+ cfg = ConfigParser.RawConfigParser()
+ cfg.read(cfgfile)
+
+ NOVA_VENDOR = cfg.get("Nova", "vendor")
+ if cfg.has_option("Nova", "vendor"):
+ NOVA_VENDOR = cfg.get("Nova", "vendor")
+
+ NOVA_PRODUCT = cfg.get("Nova", "product")
+ if cfg.has_option("Nova", "product"):
+ NOVA_PRODUCT = cfg.get("Nova", "product")
+
+ NOVA_PACKAGE = cfg.get("Nova", "package")
+ if cfg.has_option("Nova", "package"):
+ NOVA_PACKAGE = cfg.get("Nova", "package")
+ except Exception, ex:
+ LOG = logging.getLogger(__name__)
+ LOG.error("Failed to load %(cfgfile)s: %(ex)s" % locals())
+
+
+def vendor_string():
+ _load_config()
+
+ return NOVA_VENDOR
+
+
+def product_string():
+ _load_config()
+
+ return NOVA_PRODUCT
+
+
+def package_string():
+ _load_config()
+
+ return NOVA_PACKAGE
+
def canonical_version_string():
return '.'.join(filter(None, NOVA_VERSION))
@@ -30,9 +92,8 @@ def version_string():
return '%s-dev' % (canonical_version_string(),)
-def vcs_version_string():
- return 'LOCALBRANCH:LOCALREVISION'
-
-
-def version_string_with_vcs():
- return '%s-%s' % (canonical_version_string(), vcs_version_string())
+def version_string_with_package():
+ if package_string() is None:
+ return canonical_version_string()
+ else:
+ return "%s-%s" % (canonical_version_string(), package_string())
diff --git a/nova/virt/baremetal/__init__.py b/nova/virt/baremetal/__init__.py
index 520feecbd..e3ecef821 100644
--- a/nova/virt/baremetal/__init__.py
+++ b/nova/virt/baremetal/__init__.py
@@ -1,6 +1,5 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -13,6 +12,4 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
-# NOTE(sdague) for more convenient compute_driver names
from nova.virt.baremetal.driver import BareMetalDriver
diff --git a/nova/virt/baremetal/baremetal_states.py b/nova/virt/baremetal/baremetal_states.py
new file mode 100644
index 000000000..28a41ab47
--- /dev/null
+++ b/nova/virt/baremetal/baremetal_states.py
@@ -0,0 +1,32 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Possible baremetal node states for instances.
+
+Compute instance baremetal states represent the state of an instance as it
+pertains to a user or administrator. When combined with task states
+(task_states.py), a better picture can be formed regarding the instance's
+health.
+
+"""
+
+ACTIVE = 'active'
+BUILDING = 'building'
+DELETED = 'deleted'
+ERROR = 'error'
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
new file mode 100644
index 000000000..8cd9e9b3c
--- /dev/null
+++ b/nova/virt/baremetal/base.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 University of Southern California / ISI
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.baremetal import baremetal_states
+
+
+class NodeDriver(object):
+
+ def __init__(self):
+ pass
+
+ def cache_images(self, context, node, instance, **kwargs):
+ raise NotImplementedError()
+
+ def destroy_images(self, context, node, instance):
+ raise NotImplementedError()
+
+ def activate_bootloader(self, context, node, instance):
+ raise NotImplementedError()
+
+ def deactivate_bootloader(self, context, node, instance):
+ raise NotImplementedError()
+
+ def activate_node(self, context, node, instance):
+ """For operations after power on."""
+ raise NotImplementedError()
+
+ def deactivate_node(self, context, node, instance):
+ """For operations before power off."""
+ raise NotImplementedError()
+
+ def get_console_output(self, node, instance):
+ raise NotImplementedError()
+
+
+class PowerManager(object):
+
+ def __init__(self, **kwargs):
+ self.state = baremetal_states.DELETED
+ pass
+
+ def activate_node(self):
+ self.state = baremetal_states.ACTIVE
+ return self.state
+
+ def reboot_node(self):
+ self.state = baremetal_states.ACTIVE
+ return self.state
+
+ def deactivate_node(self):
+ self.state = baremetal_states.DELETED
+ return self.state
+
+ def is_power_on(self):
+ """Returns True or False according as the node's power state."""
+ return True
+
+ # TODO(NTTdocomo): split out console methods to its own class
+ def start_console(self):
+ pass
+
+ def stop_console(self):
+ pass
diff --git a/nova/virt/baremetal/db/__init__.py b/nova/virt/baremetal/db/__init__.py
new file mode 100644
index 000000000..ad883f505
--- /dev/null
+++ b/nova/virt/baremetal/db/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.baremetal.db.api import *
diff --git a/nova/virt/baremetal/db/api.py b/nova/virt/baremetal/db/api.py
new file mode 100644
index 000000000..206a59b4f
--- /dev/null
+++ b/nova/virt/baremetal/db/api.py
@@ -0,0 +1,181 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Defines interface for DB access.
+
+The underlying driver is loaded as a :class:`LazyPluggable`.
+
+Functions in this module are imported into the nova.virt.baremetal.db
+namespace. Call these functions from nova.virt.baremetal.db namespace, not
+the nova.virt.baremetal.db.api namespace.
+
+All functions in this module return objects that implement a dictionary-like
+interface. Currently, many of these objects are sqlalchemy objects that
+implement a dictionary interface. However, a future goal is to have all of
+these objects be simple dictionaries.
+
+
+**Related Flags**
+
+:baremetal_db_backend: string to lookup in the list of LazyPluggable backends.
+ `sqlalchemy` is the only supported backend right now.
+
+:[BAREMETAL] sql_connection: string specifying the sqlalchemy connection to
+ use, like: `sqlite:///var/lib/nova/nova.sqlite`.
+
+"""
+
+from nova.openstack.common import cfg
+from nova import utils
+
+
+# NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet
+# because utils.LazyPluggable doesn't support reading from
+# option groups. See bug #1093043.
+db_opts = [
+ cfg.StrOpt('baremetal_db_backend',
+ default='sqlalchemy',
+ help='The backend to use for db'),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
+
+IMPL = utils.LazyPluggable(
+ 'baremetal_db_backend',
+ sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api')
+
+
+def bm_node_get_all(context, service_host=None):
+ return IMPL.bm_node_get_all(context,
+ service_host=service_host)
+
+
+def bm_node_find_free(context, service_host=None,
+ memory_mb=None, cpus=None, local_gb=None):
+ return IMPL.bm_node_find_free(context,
+ service_host=service_host,
+ memory_mb=memory_mb,
+ cpus=cpus,
+ local_gb=local_gb)
+
+
+def bm_node_get(context, bm_node_id):
+ return IMPL.bm_node_get(context, bm_node_id)
+
+
+def bm_node_get_by_instance_uuid(context, instance_uuid):
+ return IMPL.bm_node_get_by_instance_uuid(context,
+ instance_uuid)
+
+
+def bm_node_create(context, values):
+ return IMPL.bm_node_create(context, values)
+
+
+def bm_node_destroy(context, bm_node_id):
+ return IMPL.bm_node_destroy(context, bm_node_id)
+
+
+def bm_node_update(context, bm_node_id, values):
+ return IMPL.bm_node_update(context, bm_node_id, values)
+
+
+def bm_node_set_uuid_safe(context, bm_node_id, uuid):
+ return IMPL.bm_node_set_uuid_safe(context, bm_node_id, uuid)
+
+
+def bm_pxe_ip_create(context, address, server_address):
+ return IMPL.bm_pxe_ip_create(context, address, server_address)
+
+
+def bm_pxe_ip_create_direct(context, bm_pxe_ip):
+ return IMPL.bm_pxe_ip_create_direct(context, bm_pxe_ip)
+
+
+def bm_pxe_ip_destroy(context, ip_id):
+ return IMPL.bm_pxe_ip_destroy(context, ip_id)
+
+
+def bm_pxe_ip_destroy_by_address(context, address):
+ return IMPL.bm_pxe_ip_destroy_by_address(context, address)
+
+
+def bm_pxe_ip_get_all(context):
+ return IMPL.bm_pxe_ip_get_all(context)
+
+
+def bm_pxe_ip_get(context, ip_id):
+ return IMPL.bm_pxe_ip_get(context, ip_id)
+
+
+def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
+ return IMPL.bm_pxe_ip_get_by_bm_node_id(context, bm_node_id)
+
+
+def bm_pxe_ip_associate(context, bm_node_id):
+ return IMPL.bm_pxe_ip_associate(context, bm_node_id)
+
+
+def bm_pxe_ip_disassociate(context, bm_node_id):
+ return IMPL.bm_pxe_ip_disassociate(context, bm_node_id)
+
+
+def bm_interface_get(context, if_id):
+ return IMPL.bm_interface_get(context, if_id)
+
+
+def bm_interface_get_all(context):
+ return IMPL.bm_interface_get_all(context)
+
+
+def bm_interface_destroy(context, if_id):
+ return IMPL.bm_interface_destroy(context, if_id)
+
+
+def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
+ return IMPL.bm_interface_create(context, bm_node_id, address,
+ datapath_id, port_no)
+
+
+def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
+ return IMPL.bm_interface_set_vif_uuid(context, if_id, vif_uuid)
+
+
+def bm_interface_get_by_vif_uuid(context, vif_uuid):
+ return IMPL.bm_interface_get_by_vif_uuid(context, vif_uuid)
+
+
+def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
+ return IMPL.bm_interface_get_all_by_bm_node_id(context, bm_node_id)
+
+
+def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
+ swap_mb):
+ return IMPL.bm_deployment_create(context, key, image_path,
+ pxe_config_path, root_mb, swap_mb)
+
+
+def bm_deployment_get(context, dep_id):
+ return IMPL.bm_deployment_get(context, dep_id)
+
+
+def bm_deployment_destroy(context, dep_id):
+ return IMPL.bm_deployment_destroy(context, dep_id)
diff --git a/nova/virt/baremetal/db/migration.py b/nova/virt/baremetal/db/migration.py
new file mode 100644
index 000000000..40631bf45
--- /dev/null
+++ b/nova/virt/baremetal/db/migration.py
@@ -0,0 +1,38 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Database setup and migration commands."""
+
+from nova import utils
+
+
+IMPL = utils.LazyPluggable(
+ 'baremetal_db_backend',
+ sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration')
+
+INIT_VERSION = 0
+
+
+def db_sync(version=None):
+ """Migrate the database to `version` or the most recent version."""
+ return IMPL.db_sync(version=version)
+
+
+def db_version():
+ """Display the current database version."""
+ return IMPL.db_version()
diff --git a/nova/virt/baremetal/db/sqlalchemy/__init__.py b/nova/virt/baremetal/db/sqlalchemy/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
new file mode 100644
index 000000000..e2240053c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -0,0 +1,419 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of SQLAlchemy backend."""
+
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import literal_column
+
+from nova.db.sqlalchemy.api import is_user_context
+from nova.db.sqlalchemy.api import require_admin_context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
+from nova.virt.baremetal.db.sqlalchemy import models
+from nova.virt.baremetal.db.sqlalchemy.session import get_session
+
+LOG = logging.getLogger(__name__)
+
+
+def model_query(context, *args, **kwargs):
+ """Query helper that accounts for context's `read_deleted` field.
+
+ :param context: context to query under
+ :param session: if present, the session to use
+ :param read_deleted: if present, overrides context's read_deleted field.
+ :param project_only: if present and context is user-type, then restrict
+ query to match the context's project_id.
+ """
+ session = kwargs.get('session') or get_session()
+ read_deleted = kwargs.get('read_deleted') or context.read_deleted
+ project_only = kwargs.get('project_only')
+
+ query = session.query(*args)
+
+ if read_deleted == 'no':
+ query = query.filter_by(deleted=False)
+ elif read_deleted == 'yes':
+ pass # omit the filter to include deleted and active
+ elif read_deleted == 'only':
+ query = query.filter_by(deleted=True)
+ else:
+ raise Exception(
+ _("Unrecognized read_deleted value '%s'") % read_deleted)
+
+ if project_only and is_user_context(context):
+ query = query.filter_by(project_id=context.project_id)
+
+ return query
+
+
+def _save(ref, session=None):
+ if not session:
+ session = get_session()
+ # We must not call ref.save() with session=None, otherwise NovaBase
+ # uses nova-db's session, which cannot access bm-db.
+ ref.save(session=session)
+
+
+def _build_node_order_by(query):
+ query = query.order_by(asc(models.BareMetalNode.memory_mb))
+ query = query.order_by(asc(models.BareMetalNode.cpus))
+ query = query.order_by(asc(models.BareMetalNode.local_gb))
+ return query
+
+
+@require_admin_context
+def bm_node_get_all(context, service_host=None):
+ query = model_query(context, models.BareMetalNode, read_deleted="no")
+ if service_host:
+ query = query.filter_by(service_host=service_host)
+ return query.all()
+
+
+@require_admin_context
+def bm_node_find_free(context, service_host=None,
+ cpus=None, memory_mb=None, local_gb=None):
+ query = model_query(context, models.BareMetalNode, read_deleted="no")
+ query = query.filter(models.BareMetalNode.instance_uuid == None)
+ if service_host:
+ query = query.filter_by(service_host=service_host)
+ if cpus is not None:
+ query = query.filter(models.BareMetalNode.cpus >= cpus)
+ if memory_mb is not None:
+ query = query.filter(models.BareMetalNode.memory_mb >= memory_mb)
+ if local_gb is not None:
+ query = query.filter(models.BareMetalNode.local_gb >= local_gb)
+ query = _build_node_order_by(query)
+ return query.first()
+
+
+@require_admin_context
+def bm_node_get(context, bm_node_id):
+ # bm_node_id may be passed as a string. Convert to INT to improve DB perf.
+ bm_node_id = int(bm_node_id)
+ result = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(id=bm_node_id).\
+ first()
+
+ if not result:
+ raise exception.InstanceNotFound(instance_id=bm_node_id)
+
+ return result
+
+
+@require_admin_context
+def bm_node_get_by_instance_uuid(context, instance_uuid):
+ if not uuidutils.is_uuid_like(instance_uuid):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+ result = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(instance_uuid=instance_uuid).\
+ first()
+
+ if not result:
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+ return result
+
+
+@require_admin_context
+def bm_node_create(context, values):
+ bm_node_ref = models.BareMetalNode()
+ bm_node_ref.update(values)
+ _save(bm_node_ref)
+ return bm_node_ref
+
+
+@require_admin_context
+def bm_node_update(context, bm_node_id, values):
+ model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(id=bm_node_id).\
+ update(values)
+
+
+@require_admin_context
+def bm_node_set_uuid_safe(context, bm_node_id, values):
+ """Associate an instance to a node safely
+
+ Associate an instance to a node only if that node is not yet assocated.
+ Allow the caller to set any other fields they require in the same
+ operation. For example, this is used to set the node's task_state to
+ BUILDING at the beginning of driver.spawn().
+
+ """
+ if 'instance_uuid' not in values:
+ raise exception.NovaException(_(
+ "instance_uuid must be supplied to bm_node_set_uuid_safe"))
+
+ session = get_session()
+ with session.begin():
+ query = model_query(context, models.BareMetalNode,
+ session=session, read_deleted="no").\
+ filter_by(id=bm_node_id)
+
+ count = query.filter_by(instance_uuid=None).\
+ update(values, synchronize_session=False)
+ if count != 1:
+ raise exception.NovaException(_(
+ "Failed to associate instance %(uuid)s to baremetal node "
+ "%(id)s.") % {'id': bm_node_id,
+ 'uuid': values['instance_uuid']})
+ ref = query.first()
+ return ref
+
+
+@require_admin_context
+def bm_node_destroy(context, bm_node_id):
+ model_query(context, models.BareMetalNode).\
+ filter_by(id=bm_node_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_admin_context
+def bm_pxe_ip_get_all(context):
+ query = model_query(context, models.BareMetalPxeIp, read_deleted="no")
+ return query.all()
+
+
+@require_admin_context
+def bm_pxe_ip_create(context, address, server_address):
+ ref = models.BareMetalPxeIp()
+ ref.address = address
+ ref.server_address = server_address
+ _save(ref)
+ return ref
+
+
+@require_admin_context
+def bm_pxe_ip_create_direct(context, bm_pxe_ip):
+ ref = bm_pxe_ip_create(context,
+ address=bm_pxe_ip['address'],
+ server_address=bm_pxe_ip['server_address'])
+ return ref
+
+
+@require_admin_context
+def bm_pxe_ip_destroy(context, ip_id):
+ # Delete physically since it has unique columns
+ model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(id=ip_id).\
+ delete()
+
+
+@require_admin_context
+def bm_pxe_ip_destroy_by_address(context, address):
+ # Delete physically since it has unique columns
+ model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(address=address).\
+ delete()
+
+
+@require_admin_context
+def bm_pxe_ip_get(context, ip_id):
+ result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(id=ip_id).\
+ first()
+
+ return result
+
+
+@require_admin_context
+def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
+ result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ first()
+
+ if not result:
+ raise exception.InstanceNotFound(instance_id=bm_node_id)
+
+ return result
+
+
+@require_admin_context
+def bm_pxe_ip_associate(context, bm_node_id):
+ session = get_session()
+ with session.begin():
+ # Check if the node really exists
+ node_ref = model_query(context, models.BareMetalNode,
+ read_deleted="no", session=session).\
+ filter_by(id=bm_node_id).\
+ first()
+ if not node_ref:
+ raise exception.InstanceNotFound(instance_id=bm_node_id)
+
+ # Check if the node already has a pxe_ip
+ ip_ref = model_query(context, models.BareMetalPxeIp,
+ read_deleted="no", session=session).\
+ filter_by(bm_node_id=bm_node_id).\
+ first()
+ if ip_ref:
+ return ip_ref.id
+
+ # with_lockmode('update') and filter_by(bm_node_id=None) will lock all
+ # records. It may cause a performance problem in high-concurrency
+ # environment.
+ ip_ref = model_query(context, models.BareMetalPxeIp,
+ read_deleted="no", session=session).\
+ filter_by(bm_node_id=None).\
+ with_lockmode('update').\
+ first()
+
+ # this exception is not caught in nova/compute/manager
+ if not ip_ref:
+ raise exception.NovaException(_("No more PXE IPs available"))
+
+ ip_ref.bm_node_id = bm_node_id
+ session.add(ip_ref)
+ return ip_ref.id
+
+
+@require_admin_context
+def bm_pxe_ip_disassociate(context, bm_node_id):
+ model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ update({'bm_node_id': None})
+
+
+@require_admin_context
+def bm_interface_get(context, if_id):
+ result = model_query(context, models.BareMetalInterface,
+ read_deleted="no").\
+ filter_by(id=if_id).\
+ first()
+
+ if not result:
+ raise exception.NovaException(_("Baremetal interface %s "
+ "not found") % if_id)
+
+ return result
+
+
+@require_admin_context
+def bm_interface_get_all(context):
+ query = model_query(context, models.BareMetalInterface,
+ read_deleted="no")
+ return query.all()
+
+
+@require_admin_context
+def bm_interface_destroy(context, if_id):
+ # Delete physically since it has unique columns
+ model_query(context, models.BareMetalInterface, read_deleted="no").\
+ filter_by(id=if_id).\
+ delete()
+
+
+@require_admin_context
+def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
+ ref = models.BareMetalInterface()
+ ref.bm_node_id = bm_node_id
+ ref.address = address
+ ref.datapath_id = datapath_id
+ ref.port_no = port_no
+ _save(ref)
+ return ref.id
+
+
+@require_admin_context
+def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
+ session = get_session()
+ with session.begin():
+ bm_interface = model_query(context, models.BareMetalInterface,
+ read_deleted="no", session=session).\
+ filter_by(id=if_id).\
+ with_lockmode('update').\
+ first()
+ if not bm_interface:
+ raise exception.NovaException(_("Baremetal interface %s "
+ "not found") % if_id)
+
+ bm_interface.vif_uuid = vif_uuid
+ try:
+ session.add(bm_interface)
+ session.flush()
+ except exception.DBError, e:
+ # TODO(deva): clean up when db layer raises DuplicateKeyError
+ if str(e).find('IntegrityError') != -1:
+ raise exception.NovaException(_("Baremetal interface %s "
+ "already in use") % vif_uuid)
+ else:
+ raise e
+
+
+@require_admin_context
+def bm_interface_get_by_vif_uuid(context, vif_uuid):
+ result = model_query(context, models.BareMetalInterface,
+ read_deleted="no").\
+ filter_by(vif_uuid=vif_uuid).\
+ first()
+
+ if not result:
+ raise exception.NovaException(_("Baremetal virtual interface %s "
+ "not found") % vif_uuid)
+
+ return result
+
+
+@require_admin_context
+def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
+ result = model_query(context, models.BareMetalInterface,
+ read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ all()
+
+ if not result:
+ raise exception.InstanceNotFound(instance_id=bm_node_id)
+
+ return result
+
+
+@require_admin_context
+def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
+ swap_mb):
+ ref = models.BareMetalDeployment()
+ ref.key = key
+ ref.image_path = image_path
+ ref.pxe_config_path = pxe_config_path
+ ref.root_mb = root_mb
+ ref.swap_mb = swap_mb
+ _save(ref)
+ return ref.id
+
+
+@require_admin_context
+def bm_deployment_get(context, dep_id):
+ result = model_query(context, models.BareMetalDeployment,
+ read_deleted="no").\
+ filter_by(id=dep_id).\
+ first()
+ return result
+
+
+@require_admin_context
+def bm_deployment_destroy(context, dep_id):
+ model_query(context, models.BareMetalDeployment).\
+ filter_by(id=dep_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg
new file mode 100644
index 000000000..368e93a52
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg
@@ -0,0 +1,20 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=nova_bm
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
new file mode 100644
index 000000000..2d899406c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
@@ -0,0 +1,123 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime
+from sqlalchemy import Index, Integer, MetaData, String, Table
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ bm_nodes = Table('bm_nodes', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('cpus', Integer),
+ Column('memory_mb', Integer),
+ Column('local_gb', Integer),
+ Column('pm_address', String(length=255)),
+ Column('pm_user', String(length=255)),
+ Column('pm_password', String(length=255)),
+ Column('service_host', String(length=255)),
+ Column('prov_mac_address', String(length=255)),
+ Column('instance_uuid', String(length=36)),
+ Column('registration_status', String(length=16)),
+ Column('task_state', String(length=255)),
+ Column('prov_vlan_id', Integer),
+ Column('terminal_port', Integer),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_interfaces = Table('bm_interfaces', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('bm_node_id', Integer),
+ Column('address', String(length=255), unique=True),
+ Column('datapath_id', String(length=255)),
+ Column('port_no', Integer),
+ Column('vif_uuid', String(length=36), unique=True),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_pxe_ips = Table('bm_pxe_ips', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('address', String(length=255), unique=True),
+ Column('bm_node_id', Integer),
+ Column('server_address', String(length=255), unique=True),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_deployments = Table('bm_deployments', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('bm_node_id', Integer),
+ Column('key', String(length=255)),
+ Column('image_path', String(length=255)),
+ Column('pxe_config_path', String(length=255)),
+ Column('root_mb', Integer),
+ Column('swap_mb', Integer),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_nodes.create()
+ bm_interfaces.create()
+ bm_pxe_ips.create()
+ bm_deployments.create()
+
+ Index('idx_bm_nodes_service_host_deleted',
+ bm_nodes.c.service_host, bm_nodes.c.deleted)\
+ .create(migrate_engine)
+ Index('idx_bm_nodes_instance_uuid_deleted',
+ bm_nodes.c.instance_uuid, bm_nodes.c.deleted)\
+ .create(migrate_engine)
+ Index('idx_bm_nodes_hmcld',
+ bm_nodes.c.service_host, bm_nodes.c.memory_mb, bm_nodes.c.cpus,
+ bm_nodes.c.local_gb, bm_nodes.c.deleted)\
+ .create(migrate_engine)
+
+ Index('idx_bm_interfaces_bm_node_id_deleted',
+ bm_interfaces.c.bm_node_id, bm_interfaces.c.deleted)\
+ .create(migrate_engine)
+
+ Index('idx_bm_pxe_ips_bm_node_id_deleted',
+ bm_pxe_ips.c.bm_node_id, bm_pxe_ips.c.deleted)\
+ .create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ pass
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/baremetal/db/sqlalchemy/migration.py b/nova/virt/baremetal/db/sqlalchemy/migration.py
new file mode 100644
index 000000000..929793e70
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migration.py
@@ -0,0 +1,113 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import distutils.version as dist_version
+import migrate
+from migrate.versioning import util as migrate_util
+import os
+import sqlalchemy
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt.baremetal.db import migration
+from nova.virt.baremetal.db.sqlalchemy.session import get_engine
+
+LOG = logging.getLogger(__name__)
+
+
+@migrate_util.decorator
+def patched_with_engine(f, *a, **kw):
+ url = a[0]
+ engine = migrate_util.construct_engine(url, **kw)
+
+ try:
+ kw['engine'] = engine
+ return f(*a, **kw)
+ finally:
+ if isinstance(engine, migrate_util.Engine) and engine is not url:
+ migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
+ engine.dispose()
+
+
+# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
+# on that version or higher, this can be removed
+MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
+if (not hasattr(migrate, '__version__') or
+ dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
+ migrate_util.with_engine = patched_with_engine
+
+
+# NOTE(jkoelker) Delay importing migrate until we are patched
+from migrate import exceptions as versioning_exceptions
+from migrate.versioning import api as versioning_api
+from migrate.versioning.repository import Repository
+
+
+_REPOSITORY = None
+
+
+def db_sync(version=None):
+ if version is not None:
+ try:
+ version = int(version)
+ except ValueError:
+ raise exception.NovaException(_("version should be an integer"))
+
+ current_version = db_version()
+ repository = _find_migrate_repo()
+ if version is None or version > current_version:
+ return versioning_api.upgrade(get_engine(), repository, version)
+ else:
+ return versioning_api.downgrade(get_engine(), repository,
+ version)
+
+
+def db_version():
+ repository = _find_migrate_repo()
+ try:
+ return versioning_api.db_version(get_engine(), repository)
+ except versioning_exceptions.DatabaseNotControlledError:
+ meta = sqlalchemy.MetaData()
+ engine = get_engine()
+ meta.reflect(bind=engine)
+ tables = meta.tables
+ if len(tables) == 0:
+ db_version_control(migration.INIT_VERSION)
+ return versioning_api.db_version(get_engine(), repository)
+ else:
+ # Some pre-Essex DB's may not be version controlled.
+ # Require them to upgrade using Essex first.
+ raise exception.NovaException(
+ _("Upgrade DB using Essex release first."))
+
+
+def db_version_control(version=None):
+ repository = _find_migrate_repo()
+ versioning_api.version_control(get_engine(), repository, version)
+ return version
+
+
+def _find_migrate_repo():
+ """Get the path for the migrate repository."""
+ global _REPOSITORY
+ path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'migrate_repo')
+ assert os.path.exists(path)
+ if _REPOSITORY is None:
+ _REPOSITORY = Repository(path)
+ return _REPOSITORY
diff --git a/nova/virt/baremetal/db/sqlalchemy/models.py b/nova/virt/baremetal/db/sqlalchemy/models.py
new file mode 100644
index 000000000..7f9ffb901
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/models.py
@@ -0,0 +1,77 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+SQLAlchemy models for baremetal data.
+"""
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import ForeignKey, Text
+
+from nova.db.sqlalchemy import models
+
+
+BASE = declarative_base()
+
+
+class BareMetalNode(BASE, models.NovaBase):
+ """Represents a bare metal node."""
+
+ __tablename__ = 'bm_nodes'
+ id = Column(Integer, primary_key=True)
+ service_host = Column(String(255))
+ instance_uuid = Column(String(36), nullable=True)
+ cpus = Column(Integer)
+ memory_mb = Column(Integer)
+ local_gb = Column(Integer)
+ pm_address = Column(Text)
+ pm_user = Column(Text)
+ pm_password = Column(Text)
+ prov_mac_address = Column(Text)
+ registration_status = Column(String(16))
+ task_state = Column(String(255))
+ prov_vlan_id = Column(Integer)
+ terminal_port = Column(Integer)
+
+
+class BareMetalPxeIp(BASE, models.NovaBase):
+ __tablename__ = 'bm_pxe_ips'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255), unique=True)
+ server_address = Column(String(255), unique=True)
+ bm_node_id = Column(Integer, ForeignKey('bm_nodes.id'), nullable=True)
+
+
+class BareMetalInterface(BASE, models.NovaBase):
+ __tablename__ = 'bm_interfaces'
+ id = Column(Integer, primary_key=True)
+ bm_node_id = Column(Integer, ForeignKey('bm_nodes.id'), nullable=True)
+ address = Column(String(255), unique=True)
+ datapath_id = Column(String(255))
+ port_no = Column(Integer)
+ vif_uuid = Column(String(36), unique=True)
+
+
+class BareMetalDeployment(BASE, models.NovaBase):
+ __tablename__ = 'bm_deployments'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ image_path = Column(String(255))
+ pxe_config_path = Column(String(255))
+ root_mb = Column(Integer)
+ swap_mb = Column(Integer)
diff --git a/nova/virt/baremetal/db/sqlalchemy/session.py b/nova/virt/baremetal/db/sqlalchemy/session.py
new file mode 100644
index 000000000..fcaf210a5
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/session.py
@@ -0,0 +1,64 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Session Handling for SQLAlchemy backend."""
+
+from nova.db.sqlalchemy import session as nova_session
+from nova.openstack.common import cfg
+from nova import paths
+
+opts = [
+ cfg.StrOpt('sql_connection',
+ default=('sqlite:///' +
+ paths.state_path_def('baremetal_$sqlite_db')),
+ help='The SQLAlchemy connection string used to connect to the '
+ 'bare-metal database'),
+ ]
+
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
+
+CONF.import_opt('sqlite_db', 'nova.db.sqlalchemy.session')
+
+_ENGINE = None
+_MAKER = None
+
+
+def get_session(autocommit=True, expire_on_commit=False):
+ """Return a SQLAlchemy session."""
+ global _MAKER
+
+ if _MAKER is None:
+ engine = get_engine()
+ _MAKER = nova_session.get_maker(engine, autocommit, expire_on_commit)
+
+ session = _MAKER()
+ return session
+
+
+def get_engine():
+ """Return a SQLAlchemy engine."""
+ global _ENGINE
+ if _ENGINE is None:
+ _ENGINE = nova_session.create_engine(CONF.baremetal.sql_connection)
+ return _ENGINE
diff --git a/nova/virt/baremetal/doc/README.rst b/nova/virt/baremetal/doc/README.rst
new file mode 100644
index 000000000..6d5cfd466
--- /dev/null
+++ b/nova/virt/baremetal/doc/README.rst
@@ -0,0 +1,69 @@
+General Bare-metal Provisioning README
+======================================
+
+:Authors:
+ [USC/ISI] Mikyung Kang <mkkang@isi.edu>, David Kang <dkang@isi.edu>
+
+ [NTT DOCOMO] Ken Igarashi <igarashik@nttdocomo.co.jp>
+
+ [VirtualTech Japan Inc.] Arata Notsu <notsu@virtualtech.jp>
+:Date: 2012-08-02
+:Version: 2012.8
+:Wiki: http://wiki.openstack.org/GeneralBareMetalProvisioningFramework
+
+Code changes
+------------
+
+::
+
+ nova/nova/virt/baremetal/*
+ nova/nova/virt/driver.py
+ nova/nova/tests/baremetal/*
+ nova/nova/tests/compute/test_compute.py
+ nova/nova/compute/manager.py
+ nova/nova/compute/resource_tracker.py
+ nova/nova/manager.py
+ nova/nova/scheduler/driver.py
+ nova/nova/scheduler/filter_scheduler.py
+ nova/nova/scheduler/host_manager.py
+ nova/nova/scheduler/baremetal_host_manager.py
+ nova/bin/bm_deploy_server
+ nova/bin/nova-bm-manage
+
+Additional setting for bare-metal provisioning [nova.conf]
+----------------------------------------------------------
+
+::
+
+ # baremetal database connection
+ baremetal_sql_connection = mysql://$ID:$Password@$IP/nova_bm
+
+ # baremetal compute driver
+ compute_driver = nova.virt.baremetal.driver.BareMetalDriver
+ baremetal_driver = {nova.virt.baremetal.tilera.TILERA | nova.virt.baremetal.pxe.PXE}
+ power_manager = {nova.virt.baremetal.tilera_pdu.Pdu | nova.virt.baremetal.ipmi.Ipmi}
+
+ # instance_type_extra_specs this baremetal compute
+ instanse_type_extra_specs = cpu_arch:{tilepro64 | x86_64 | arm}
+
+ # TFTP root
+ baremetal_tftp_root = /tftpboot
+
+ # baremetal scheduler host manager
+ scheduler_host_manager = nova.scheduler.baremetal_host_manager.BaremetalHostManager
+
+
+Non-PXE (Tilera) Bare-metal Provisioning
+----------------------------------------
+
+1. tilera-bm-instance-creation.rst
+
+2. tilera-bm-installation.rst
+
+PXE Bare-metal Provisioning
+---------------------------
+
+1. pxe-bm-instance-creation.rst
+
+2. pxe-bm-installation.rst
+
diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py
deleted file mode 100644
index aef648eb0..000000000
--- a/nova/virt/baremetal/dom.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import power_state
-from nova import exception
-from nova import flags
-from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
-from nova.virt.baremetal import nodes
-
-LOG = logging.getLogger(__name__)
-
-
-def read_domains(fname):
- try:
- f = open(fname, 'r')
- json = f.read()
- f.close()
- domains = jsonutils.loads(json)
- return domains
- except IOError:
- raise exception.NotFound()
-
-
-def write_domains(fname, domains):
- json = jsonutils.dumps(domains)
- f = open(fname, 'w')
- f.write(json)
- f.close()
-
-
-class BareMetalDom(object):
- """
- BareMetalDom class handles fake domain for bare metal back ends.
-
- This implements the singleton pattern.
- """
-
- _instance = None
- _is_init = False
-
- def __new__(cls, *args, **kwargs):
- """
- Returns the BareMetalDom singleton.
- """
- if not cls._instance or ('new' in kwargs and kwargs['new']):
- cls._instance = super(BareMetalDom, cls).__new__(cls)
- return cls._instance
-
- def __init__(self,
- fake_dom_file="/tftpboot/test_fake_dom_file"):
- """
- Only call __init__ the first time object is instantiated.
-
- Sets and Opens domain file: /tftpboot/test_fake_dom_file. Even though
- nova-compute service is rebooted, this file should retain the
- existing domains.
- """
- if self._is_init:
- return
- self._is_init = True
-
- self.fake_dom_file = fake_dom_file
- self.domains = []
- self.fake_dom_nums = 0
- self.baremetal_nodes = nodes.get_baremetal_nodes()
-
- self._read_domain_from_file()
-
- def _read_domain_from_file(self):
- """
- Reads the domains from a file.
- """
- try:
- self.domains = read_domains(self.fake_dom_file)
- except IOError:
- dom = []
- LOG.debug(_("No domains exist."))
- return
- msg = _("============= initial domains =========== : %s")
- LOG.debug(msg % (self.domains))
- for dom in self.domains[:]:
- if dom['status'] == power_state.BUILDING:
- LOG.debug(_("Building domain: to be removed"))
- self.destroy_domain(dom['name'])
- continue
- elif dom['status'] != power_state.RUNNING:
- LOG.debug(_("Not running domain: remove"))
- self.domains.remove(dom)
- continue
- res = self.baremetal_nodes.set_status(dom['node_id'],
- dom['status'])
- if res > 0:
- self.fake_dom_nums = self.fake_dom_nums + 1
- else:
- LOG.debug(_("domain running on an unknown node: discarded"))
- self.domains.remove(dom)
- continue
-
- LOG.debug(self.domains)
- self.store_domain()
-
- def reboot_domain(self, name):
- """
- Finds domain and deactivates (power down) bare-metal node.
-
- Activates the node again. In case of fail,
- destroys the domain from domains list.
- """
- fd = self.find_domain(name)
- if fd == []:
- msg = _("No such domain (%s)")
- raise exception.NotFound(msg % name)
- node_ip = self.baremetal_nodes.get_ip_by_id(fd['node_id'])
-
- try:
- self.baremetal_nodes.deactivate_node(fd['node_id'])
- except Exception:
- msg = _("Failed power down Bare-metal node %s")
- raise exception.NotFound(msg % fd['node_id'])
- self.change_domain_state(name, power_state.BUILDING)
- try:
- state = self.baremetal_nodes.activate_node(fd['node_id'],
- node_ip, name, fd['mac_address'], fd['ip_address'])
- self.change_domain_state(name, state)
- return state
- except Exception:
- LOG.debug(_("deactivate -> activate fails"))
- self.destroy_domain(name)
- raise
-
- def destroy_domain(self, name):
- """
- Removes domain from domains list and deactivates node.
- """
- fd = self.find_domain(name)
- if fd == []:
- LOG.debug(_("destroy_domain: no such domain"))
- msg = _("No such domain %s")
- raise exception.NotFound(msg % name)
-
- try:
- self.baremetal_nodes.deactivate_node(fd['node_id'])
-
- self.domains.remove(fd)
- msg = _("Domains: %s")
- LOG.debug(msg % (self.domains))
- self.store_domain()
- msg = _("After storing domains: %s")
- LOG.debug(msg % (self.domains))
- except Exception:
- LOG.debug(_("deactivation/removing domain failed"))
- raise
-
- def create_domain(self, xml_dict, bpath):
- """
- Adds a domain to domains list and activates an idle bare-metal node.
- """
- LOG.debug(_("===== Domain is being created ====="))
- fd = self.find_domain(xml_dict['name'])
- if fd != []:
- msg = _("Same domain name already exists")
- raise exception.NotFound(msg)
- LOG.debug(_("create_domain: before get_idle_node"))
-
- node_id = self.baremetal_nodes.get_idle_node()
- node_ip = self.baremetal_nodes.get_ip_by_id(node_id)
-
- new_dom = {'node_id': node_id,
- 'name': xml_dict['name'],
- 'memory_kb': xml_dict['memory_kb'],
- 'vcpus': xml_dict['vcpus'],
- 'mac_address': xml_dict['mac_address'],
- 'user_data': xml_dict['user_data'],
- 'ip_address': xml_dict['ip_address'],
- 'image_id': xml_dict['image_id'],
- 'kernel_id': xml_dict['kernel_id'],
- 'ramdisk_id': xml_dict['ramdisk_id'],
- 'status': power_state.BUILDING}
- self.domains.append(new_dom)
- msg = _("Created new domain: %s")
- LOG.debug(msg % (new_dom))
- self.change_domain_state(new_dom['name'], power_state.BUILDING)
-
- self.baremetal_nodes.set_image(bpath, node_id)
-
- state = power_state.NOSTATE
- try:
- state = self.baremetal_nodes.activate_node(node_id,
- node_ip, new_dom['name'], new_dom['mac_address'],
- new_dom['ip_address'], new_dom['user_data'])
- self.change_domain_state(new_dom['name'], state)
- except Exception:
- self.domains.remove(new_dom)
- self.baremetal_nodes.free_node(node_id)
- LOG.debug(_("Failed to boot Bare-metal node %s"), node_id)
- return state
-
- def change_domain_state(self, name, state):
- """
- Changes domain state by the given state and updates domain file.
- """
- l = self.find_domain(name)
- if l == []:
- msg = _("No such domain exists")
- raise exception.NotFound(msg)
- i = self.domains.index(l)
- self.domains[i]['status'] = state
- LOG.debug(_("change_domain_state: to new state %s"), str(state))
- self.store_domain()
-
- def store_domain(self):
- """
- Stores fake domains to the file.
- """
- msg = _("Stored fake domains to the file: %s")
- LOG.debug(msg % (self.domains))
- write_domains(self.fake_dom_file, self.domains)
-
- def find_domain(self, name):
- """
- Finds domain by the given name and returns the domain.
- """
- for item in self.domains:
- if item['name'] == name:
- return item
- LOG.debug(_("domain does not exist"))
- return []
-
- def list_domains(self):
- """
- Returns the instance name from domains list.
- """
- if self.domains == []:
- return []
- return [x['name'] for x in self.domains]
-
- def get_domain_info(self, instance_name):
- """
- Finds domain by the given instance_name and returns informaiton.
-
- For example, status, memory_kb, vcpus, etc.
- """
- domain = self.find_domain(instance_name)
- if domain != []:
- return [domain['status'], domain['memory_kb'],
- domain['memory_kb'],
- domain['vcpus'],
- 100]
- else:
- return [power_state.NOSTATE, '', '', '', '']
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index b0576aa38..9904fdcd4 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -1,6 +1,9 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
+# coding=utf-8
+#
+# Copyright (c) 2012 NTT DOCOMO, INC
+# Copyright (c) 2011 University of Southern California / ISI
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -13,731 +16,459 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-#
-"""
-A connection to a hypervisor through baremetal.
-
-**Related Flags**
-
-:baremetal_type: Baremetal domain type.
-:baremetal_uri: Override for the default baremetal URI (baremetal_type).
-:rescue_image_id: Rescue ami image (default: ami-rescue).
-:rescue_kernel_id: Rescue aki image (default: aki-rescue).
-:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
-:injected_network_template: Template file for injected network
-:allow_project_net_traffic: Whether to allow in project network traffic
"""
+A driver for Bare-metal platform.
+"""
-import hashlib
-import os
-import shutil
-
-from nova.compute import instance_types
from nova.compute import power_state
-from nova.compute import vm_states
-from nova import config
from nova import context as nova_context
from nova import exception
-from nova import flags
-from nova import notifications
from nova.openstack.common import cfg
-from nova.openstack.common import fileutils
-from nova.openstack.common import lockutils
+from nova.openstack.common import importutils
from nova.openstack.common import log as logging
-from nova import utils
-from nova.virt.baremetal import dom
-from nova.virt.baremetal import nodes
-from nova.virt.disk import api as disk
+from nova import paths
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import db
from nova.virt import driver
-from nova.virt.libvirt import utils as libvirt_utils
-
-
-Template = None
-
-LOG = logging.getLogger(__name__)
-
-CONF = config.CONF
-
-baremetal_opts = [
- cfg.StrOpt('baremetal_type',
- default='baremetal',
- help='baremetal domain type'),
+from nova.virt import firewall
+from nova.virt.libvirt import imagecache
+
+opts = [
+ cfg.BoolOpt('inject_password',
+ default=True,
+ help='Whether baremetal compute injects password or not'),
+ cfg.StrOpt('injected_network_template',
+ default=paths.basedir_def('nova/virt/'
+ 'baremetal/interfaces.template'),
+ help='Template file for injected network'),
+ cfg.StrOpt('vif_driver',
+ default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver',
+ help='Baremetal VIF driver.'),
+ cfg.StrOpt('volume_driver',
+ default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver',
+ help='Baremetal volume driver.'),
+ cfg.ListOpt('instance_type_extra_specs',
+ default=[],
+ help='a list of additional capabilities corresponding to '
+ 'instance_type_extra_specs for this compute '
+ 'host to advertise. Valid entries are name=value, pairs '
+ 'For example, "key1:val1, key2:val2"'),
+ cfg.StrOpt('driver',
+ default='nova.virt.baremetal.pxe.PXE',
+ help='Baremetal driver back-end (pxe or tilera)'),
+ cfg.StrOpt('power_manager',
+ default='nova.virt.baremetal.ipmi.IPMI',
+ help='Baremetal power management method'),
+ cfg.StrOpt('tftp_root',
+ default='/tftpboot',
+ help='Baremetal compute node\'s tftp root path'),
]
-CONF.register_opts(baremetal_opts)
-
-
-def _late_load_cheetah():
- global Template
- if Template is None:
- t = __import__('Cheetah.Template', globals(), locals(),
- ['Template'], -1)
- Template = t.Template
-
-class BareMetalDriver(driver.ComputeDriver):
-
- def __init__(self, virtapi, read_only):
- _late_load_cheetah()
- # Note that baremetal doesn't have a read-only connection
- # mode, so the read_only parameter is ignored
- super(BareMetalDriver, self).__init__(virtapi)
- self.baremetal_nodes = nodes.get_baremetal_nodes()
- self._wrapped_conn = None
- self._host_state = None
-
- @property
- def HostState(self):
- if not self._host_state:
- self._host_state = HostState(self)
- return self._host_state
-
- def init_host(self, host):
- pass
-
- def _get_connection(self):
- self._wrapped_conn = dom.BareMetalDom()
- return self._wrapped_conn
- _conn = property(_get_connection)
-
- def get_pty_for_instance(self, instance_name):
- raise NotImplementedError()
+LOG = logging.getLogger(__name__)
- def list_instances(self):
- return self._conn.list_domains()
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
- def destroy(self, instance, network_info, block_device_info=None,
- cleanup=True):
- while True:
- try:
- self._conn.destroy_domain(instance['name'])
- break
- except Exception as ex:
- LOG.debug(_("Error encountered when destroying instance "
- "'%(name)s': %(ex)s") %
- {"name": instance["name"], "ex": ex},
- instance=instance)
- break
-
- if cleanup:
- self._cleanup(instance)
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
+CONF.import_opt('host', 'nova.netconf')
- return True
+DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
+ firewall.__name__,
+ firewall.NoopFirewallDriver.__name__)
- def _cleanup(self, instance):
- target = os.path.join(CONF.instances_path, instance['name'])
- instance_name = instance['name']
- LOG.info(_('instance %(instance_name)s: deleting instance files'
- ' %(target)s') % locals(), instance=instance)
- if CONF.baremetal_type == 'lxc':
- disk.destroy_container(self.container)
- if os.path.exists(target):
- shutil.rmtree(target)
-
- @exception.wrap_exception
- def attach_volume(self, instance_name, device_path, mountpoint):
- raise exception.Invalid("attach_volume not supported for baremetal.")
-
- @exception.wrap_exception
- def detach_volume(self, instance_name, mountpoint):
- raise exception.Invalid("detach_volume not supported for baremetal.")
-
- @exception.wrap_exception
- def snapshot(self, instance, image_id):
- raise exception.Invalid("snapshot not supported for baremetal.")
-
- @exception.wrap_exception
- def reboot(self, instance):
- timer = utils.LoopingCall(f=None)
-
- def _wait_for_reboot():
- try:
- state = self._conn.reboot_domain(instance['name'])
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'],
- instance=instance)
- timer.stop()
- except Exception:
- LOG.exception(_('_wait_for_reboot failed'), instance=instance)
- timer.stop()
- timer.f = _wait_for_reboot
- return timer.start(interval=0.5).wait()
-
- @exception.wrap_exception
- def rescue(self, context, instance, network_info, rescue_password):
- """Loads a VM using rescue images.
-
- A rescue is normally performed when something goes wrong with the
- primary images and data needs to be corrected/recovered. Rescuing
- should not edit or over-ride the original image, only allow for
- data recovery.
- """
- self.destroy(instance, False)
+def _get_baremetal_nodes(context):
+ nodes = db.bm_node_get_all(context, service_host=CONF.host)
+ return nodes
- rescue_images = {'image_id': CONF.baremetal_rescue_image_id,
- 'kernel_id': CONF.baremetal_rescue_kernel_id,
- 'ramdisk_id': CONF.baremetal_rescue_ramdisk_id}
- self._create_image(instance, '.rescue', rescue_images,
- network_info=network_info)
- timer = utils.LoopingCall(f=None)
+def _get_baremetal_node_by_instance_uuid(instance_uuid):
+ ctx = nova_context.get_admin_context()
+ node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid)
+ if node['service_host'] != CONF.host:
+ LOG.error(_("Request for baremetal node %s "
+ "sent to wrong service host") % instance_uuid)
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ return node
- def _wait_for_rescue():
- try:
- state = self._conn.reboot_domain(instance['name'])
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'],
- instance=instance)
- timer.stop()
- except Exception:
- LOG.exception(_('_wait_for_rescue failed'), instance=instance)
- timer.stop()
- timer.f = _wait_for_rescue
- return timer.start(interval=0.5).wait()
-
- @exception.wrap_exception
- def unrescue(self, instance, network_info):
- """Reboot the VM which is being rescued back into primary images.
-
- Because reboot destroys and re-creates instances, unresue should
- simply call reboot.
- """
- self.reboot(instance)
+def _update_state(context, node, instance, state):
+ """Update the node state in baremetal DB
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info=None):
- LOG.debug(_("<============= spawn of baremetal =============>"))
-
- def basepath(fname='', suffix=''):
- return os.path.join(CONF.instances_path,
- instance['name'],
- fname + suffix)
- bpath = basepath(suffix='')
- timer = utils.LoopingCall(f=None)
-
- xml_dict = self.to_xml_dict(instance, network_info)
- self._create_image(context, instance, xml_dict,
- network_info=network_info,
- block_device_info=block_device_info)
- LOG.debug(_("instance %s: is building"), instance['name'],
- instance=instance)
- LOG.debug(xml_dict, instance=instance)
-
- def _wait_for_boot():
- try:
- LOG.debug(_("Key is injected but instance is not running yet"),
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.BUILDING})
- notifications.send_update(context, old_ref, new_ref)
-
- state = self._conn.create_domain(xml_dict, bpath)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'],
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.ACTIVE})
- notifications.send_update(context, old_ref, new_ref)
-
- LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state,
- instance=instance)
- LOG.debug(_("instance %s spawned successfully"),
- instance['name'], instance=instance)
- else:
- LOG.debug(_('instance %s:not booted'), instance['name'],
- instance=instance)
- except Exception:
- LOG.exception(_("Baremetal assignment is overcommitted."),
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.ERROR,
- 'power_state': power_state.FAILED})
- notifications.send_update(context, old_ref, new_ref)
-
- timer.stop()
- timer.f = _wait_for_boot
-
- return timer.start(interval=0.5).wait()
+ If instance is not supplied, reset the instance_uuid field for this node.
- def get_console_output(self, instance):
- console_log = os.path.join(CONF.instances_path, instance['name'],
- 'console.log')
+ """
+ values = {'task_state': state}
+ if not instance:
+ values['instance_uuid'] = None
+ db.bm_node_update(context, node['id'], values)
- libvirt_utils.chown(console_log, os.getuid())
- fd = self._conn.find_domain(instance['name'])
+def get_power_manager(**kwargs):
+ cls = importutils.import_class(CONF.baremetal.power_manager)
+ return cls(**kwargs)
- self.baremetal_nodes.get_console_output(console_log, fd['node_id'])
- fpath = console_log
+class BareMetalDriver(driver.ComputeDriver):
+ """BareMetal hypervisor driver."""
- return libvirt_utils.load_file(fpath)
+ capabilities = {
+ "has_imagecache": True,
+ }
- @exception.wrap_exception
- def get_ajax_console(self, instance):
- raise NotImplementedError()
+ def __init__(self, virtapi, read_only=False):
+ super(BareMetalDriver, self).__init__(virtapi)
- @exception.wrap_exception
- def get_vnc_console(self, instance):
- raise NotImplementedError()
+ self.driver = importutils.import_object(
+ CONF.baremetal.driver)
+ self.vif_driver = importutils.import_object(
+ CONF.baremetal.vif_driver)
+ self.firewall_driver = firewall.load_driver(
+ default=DEFAULT_FIREWALL_DRIVER)
+ self.volume_driver = importutils.import_object(
+ CONF.baremetal.volume_driver, virtapi)
+ self.image_cache_manager = imagecache.ImageCacheManager()
+
+ extra_specs = {}
+ extra_specs["baremetal_driver"] = CONF.baremetal.driver
+ for pair in CONF.baremetal.instance_type_extra_specs:
+ keyval = pair.split(':', 1)
+ keyval[0] = keyval[0].strip()
+ keyval[1] = keyval[1].strip()
+ extra_specs[keyval[0]] = keyval[1]
+ if not 'cpu_arch' in extra_specs:
+ LOG.warning(
+ _('cpu_arch is not found in instance_type_extra_specs'))
+ extra_specs['cpu_arch'] = ''
+ self.extra_specs = extra_specs
+
+ self.supported_instances = [
+ (extra_specs['cpu_arch'], 'baremetal', 'baremetal'),
+ ]
+
+ @classmethod
+ def instance(cls):
+ if not hasattr(cls, '_instance'):
+ cls._instance = cls()
+ return cls._instance
- @staticmethod
- def _cache_image(fetch_func, target, fname, cow=False, *args, **kwargs):
- """Wrapper for a method that creates an image that caches the image.
+ def init_host(self, host):
+ return
- This wrapper will save the image into a common store and create a
- copy for use by the hypervisor.
+ def get_hypervisor_type(self):
+ return 'baremetal'
- The underlying method should specify a kwarg of target representing
- where the image will be saved.
+ def get_hypervisor_version(self):
+ # TODO(deva): define the version properly elsewhere
+ return 1
- fname is used as the filename of the base image. The filename needs
- to be unique to a given image.
+ def legacy_nwinfo(self):
+ return True
- If cow is True, it will make a CoW image instead of a copy.
- """
- if not os.path.exists(target):
- base_dir = os.path.join(CONF.instances_path, '_base')
- if not os.path.exists(base_dir):
- fileutils.ensure_tree(base_dir)
- base = os.path.join(base_dir, fname)
-
- @lockutils.synchronized(fname, 'nova-')
- def call_if_not_exists(base, fetch_func, *args, **kwargs):
- if not os.path.exists(base):
- fetch_func(target=base, *args, **kwargs)
-
- call_if_not_exists(base, fetch_func, *args, **kwargs)
-
- if cow:
- libvirt_utils.create_cow_image(base, target)
- else:
- libvirt_utils.copy_image(base, target)
-
- def _create_image(self, context, inst, xml, suffix='',
- disk_images=None, network_info=None,
- block_device_info=None):
- if not suffix:
- suffix = ''
-
- # syntactic nicety
- def basepath(fname='', suffix=suffix):
- return os.path.join(CONF.instances_path,
- inst['name'],
- fname + suffix)
-
- # ensure directories exist and are writable
- fileutils.ensure_tree(basepath(suffix=''))
- utils.execute('chmod', '0777', basepath(suffix=''))
-
- LOG.info(_('instance %s: Creating image'), inst['name'],
- instance=inst)
-
- if CONF.baremetal_type == 'lxc':
- container_dir = '%s/rootfs' % basepath(suffix='')
- fileutils.ensure_tree(container_dir)
-
- # NOTE(vish): No need add the suffix to console.log
- libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
-
- if not disk_images:
- disk_images = {'image_id': inst['image_ref'],
- 'kernel_id': inst['kernel_id'],
- 'ramdisk_id': inst['ramdisk_id']}
-
- if disk_images['kernel_id']:
- fname = disk_images['kernel_id']
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('kernel'),
- fname=fname,
- cow=False,
- image_id=disk_images['kernel_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
- if disk_images['ramdisk_id']:
- fname = disk_images['ramdisk_id']
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('ramdisk'),
- fname=fname,
- cow=False,
- image_id=disk_images['ramdisk_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
-
- root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
- size = inst['root_gb'] * 1024 * 1024 * 1024
-
- inst_type_id = inst['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
- if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
- size = None
- root_fname += "_sm"
- else:
- root_fname += "_%d" % inst['root_gb']
-
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('root'),
- fname=root_fname,
- cow=False, # CONF.use_cow_images,
- image_id=disk_images['image_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
-
- # For now, we assume that if we're not using a kernel, we're using a
- # partitioned disk image where the target partition is the first
- # partition
- target_partition = None
- if not inst['kernel_id']:
- target_partition = "1"
-
- if CONF.baremetal_type == 'lxc':
- target_partition = None
-
- if inst['key_data']:
- key = str(inst['key_data'])
- else:
- key = None
- net = None
-
- nets = []
- ifc_template = open(CONF.injected_network_template).read()
- ifc_num = -1
- have_injected_networks = False
- admin_context = nova_context.get_admin_context()
- for (network_ref, mapping) in network_info:
- ifc_num += 1
-
- if not network_ref['injected']:
+ def list_instances(self):
+ l = []
+ ctx = nova_context.get_admin_context()
+ for node in _get_baremetal_nodes(ctx):
+ if not node['instance_uuid']:
+ # Not currently assigned to an instance.
continue
-
- have_injected_networks = True
- address = mapping['ips'][0]['ip']
- netmask = mapping['ips'][0]['netmask']
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if CONF.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- net_info = {'name': 'eth%d' % ifc_num,
- 'address': address,
- 'netmask': netmask,
- 'gateway': mapping['gateway'],
- 'broadcast': mapping['broadcast'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6}
- nets.append(net_info)
-
- if have_injected_networks:
- net = str(Template(ifc_template,
- searchList=[{'interfaces': nets,
- 'use_ipv6': CONF.use_ipv6}]))
-
- metadata = inst.get('metadata')
- if any((key, net, metadata)):
- inst_name = inst['name']
-
- injection_path = basepath('root')
- img_id = inst['image_ref']
-
- for injection in ('metadata', 'key', 'net'):
- if locals()[injection]:
- LOG.info(_('instance %(inst_name)s: injecting '
- '%(injection)s into image %(img_id)s'),
- locals(), instance=inst)
try:
- disk.inject_data(injection_path, key, net, metadata,
- partition=target_partition,
- use_cow=False) # CONF.use_cow_images
-
- except Exception as e:
- # This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
- ' data into image %(img_id)s (%(e)s)') % locals(),
- instance=inst)
-
- def _prepare_xml_info(self, instance, network_info, rescue,
- block_device_info=None):
- # block_device_mapping = driver.block_device_info_get_mapping(
- # block_device_info)
- _map = 0
- for (_, mapping) in network_info:
- _map += 1
-
- nics = []
- # FIXME(vish): stick this in db
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
-
- driver_type = 'raw'
-
- xml_info = {'type': CONF.baremetal_type,
- 'name': instance['name'],
- 'basepath': os.path.join(CONF.instances_path,
- instance['name']),
- 'memory_kb': inst_type['memory_mb'] * 1024,
- 'vcpus': inst_type['vcpus'],
- 'rescue': rescue,
- 'driver_type': driver_type,
- 'nics': nics,
- 'ip_address': mapping['ips'][0]['ip'],
- 'mac_address': mapping['mac'],
- 'user_data': instance['user_data'],
- 'image_id': instance['image_ref'],
- 'kernel_id': instance['kernel_id'],
- 'ramdisk_id': instance['ramdisk_id']}
-
- if not rescue:
- if instance['kernel_id']:
- xml_info['kernel'] = xml_info['basepath'] + "/kernel"
-
- if instance['ramdisk_id']:
- xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
-
- xml_info['disk'] = xml_info['basepath'] + "/disk"
- return xml_info
-
- def to_xml_dict(self, instance, rescue=False, network_info=None):
- LOG.debug(_('instance %s: starting toXML method'), instance['name'],
- instance=instance)
- xml_info = self._prepare_xml_info(instance, rescue, network_info)
- LOG.debug(_('instance %s: finished toXML method'), instance['name'],
- instance=instance)
- return xml_info
-
- def get_info(self, instance):
- """Retrieve information from baremetal for a specific instance name.
+ inst = self.virtapi.instance_get_by_uuid(
+ ctx, node['instance_uuid'])
+ except exception.InstanceNotFound:
+ # Assigned to an instance that no longer exists.
+ LOG.warning(_("Node %(id)r assigned to instance %(uuid)r "
+ "which cannot be found."),
+ dict(id=node['id'], uuid=node['instance_uuid']))
+ continue
+ l.append(inst['name'])
+ return l
- If a baremetal error is encountered during lookup, we might raise a
- NotFound exception or Error exception depending on how severe the
- baremetal error is.
+ def _require_node(self, instance):
+ """Get a node_id out of a manager instance dict.
+ The compute manager is meant to know the node id, so a missing node is
+ a significant issue - it may mean we've been passed someone elses data.
"""
- _domain_info = self._conn.get_domain_info(instance['name'])
- state, max_mem, mem, num_cpu, cpu_time = _domain_info
- return {'state': state,
- 'max_mem': max_mem,
- 'mem': mem,
- 'num_cpu': num_cpu,
- 'cpu_time': cpu_time}
-
- def _create_new_domain(self, persistent=True, launch_flags=0):
- raise NotImplementedError()
+ node_id = instance.get('node')
+ if not node_id:
+ raise exception.NovaException(_(
+ "Baremetal node id not supplied to driver for %r")
+ % instance['uuid'])
+ return node_id
+
+ def macs_for_instance(self, instance):
+ context = nova_context.get_admin_context()
+ node_id = self._require_node(instance)
+ return set(iface['address'] for iface in
+ db.bm_interface_get_all_by_bm_node_id(context, node_id))
- def get_diagnostics(self, instance_name):
- # diagnostics are not supported for baremetal
- raise NotImplementedError()
-
- def get_disks(self, instance_name):
- raise NotImplementedError()
-
- def get_interfaces(self, instance_name):
- raise NotImplementedError()
-
- def get_vcpu_total(self):
- """Get vcpu number of physical computer.
-
- :returns: the number of cpu core.
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ node_id = self._require_node(instance)
- """
+ # NOTE(deva): this db method will raise an exception if the node is
+ # already in use. We call it here to ensure no one else
+ # allocates this node before we begin provisioning it.
+ node = db.bm_node_set_uuid_safe(context, node_id,
+ {'instance_uuid': instance['uuid'],
+ 'task_state': baremetal_states.BUILDING})
+ pm = get_power_manager(node=node, instance=instance)
- # On certain platforms, this will raise a NotImplementedError.
try:
- return self.baremetal_nodes.get_hw_info('vcpus')
- except NotImplementedError:
- LOG.warn(_("Cannot get the number of cpu, because this "
- "function is not implemented for this platform. "
- "This error can be safely ignored for now."))
- return False
-
- def get_memory_mb_total(self):
- """Get the total memory size(MB) of physical computer.
-
- :returns: the total amount of memory(MB).
-
- """
- return self.baremetal_nodes.get_hw_info('memory_mb')
-
- def get_local_gb_total(self):
- """Get the total hdd size(GB) of physical computer.
-
- :returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
- return self.baremetal_nodes.get_hw_info('local_gb')
+ self._plug_vifs(instance, network_info, context=context)
+
+ self.firewall_driver.setup_basic_filtering(
+ instance, network_info)
+ self.firewall_driver.prepare_instance_filter(
+ instance, network_info)
+ self.firewall_driver.apply_instance_filter(
+ instance, network_info)
+
+ block_device_mapping = driver.\
+ block_device_info_get_mapping(block_device_info)
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mountpoint = vol['mount_device']
+ self.attach_volume(
+ connection_info, instance['name'], mountpoint)
- def get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
+ try:
+ image_info = self.driver.cache_images(
+ context, node, instance,
+ admin_password=admin_password,
+ image_meta=image_meta,
+ injected_files=injected_files,
+ network_info=network_info,
+ )
+ try:
+ self.driver.activate_bootloader(context, node, instance)
+ except Exception, e:
+ self.driver.deactivate_bootloader(context, node, instance)
+ raise e
+ except Exception, e:
+ self.driver.destroy_images(context, node, instance)
+ raise e
+ except Exception, e:
+ # TODO(deva): do network and volume cleanup here
+ raise e
+ else:
+ # NOTE(deva): pm.activate_node should not raise exceptions.
+ # We check its success in "finally" block
+ pm.activate_node()
+ pm.start_console()
+ finally:
+ if pm.state != baremetal_states.ACTIVE:
+ pm.state = baremetal_states.ERROR
+ try:
+ _update_state(context, node, instance, pm.state)
+ except exception.DBError, e:
+ LOG.warning(_("Failed to update state record for "
+ "baremetal node %s") % instance['uuid'])
+
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ ctx = nova_context.get_admin_context()
+ pm = get_power_manager(node=node, instance=instance)
+ state = pm.reboot_node()
+ _update_state(ctx, node, instance, state)
+
+ def destroy(self, instance, network_info, block_device_info=None):
+ ctx = nova_context.get_admin_context()
- :returns: The total number of vcpu that currently used.
+ try:
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ except exception.InstanceNotFound:
+ # TODO(deva): refactor so that dangling files can be cleaned
+ # up even after a failed boot or delete
+ LOG.warning(_("Delete called on non-existing instance %s")
+ % instance['uuid'])
+ return
- """
- return len(self._conn.list_domains())
+ self.driver.deactivate_node(ctx, node, instance)
- def get_memory_mb_used(self):
- """Get the free memory size(MB) of physical computer.
+ pm = get_power_manager(node=node, instance=instance)
- :returns: the total usage of memory(MB).
+ pm.stop_console()
- """
- return self.baremetal_nodes.get_hw_info('memory_mb_used')
+ ## power off the node
+ state = pm.deactivate_node()
- def get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
+ ## cleanup volumes
+ # NOTE(vish): we disconnect from volumes regardless
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mountpoint = vol['mount_device']
+ self.detach_volume(connection_info, instance['name'], mountpoint)
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
+ self.driver.deactivate_bootloader(ctx, node, instance)
- """
- return self.baremetal_nodes.get_hw_info('local_gb_used')
-
- def get_hypervisor_type(self):
- """Get hypervisor type.
+ self.driver.destroy_images(ctx, node, instance)
- :returns: hypervisor type (ex. qemu)
+ # stop firewall
+ self.firewall_driver.unfilter_instance(instance,
+ network_info=network_info)
- """
- return self.baremetal_nodes.get_hw_info('hypervisor_type')
+ self._unplug_vifs(instance, network_info)
- def get_hypervisor_version(self):
- """Get hypervisor version.
+ _update_state(ctx, node, None, state)
- :returns: hypervisor version (ex. 12003)
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ pm = get_power_manager(node=node, instance=instance)
+ pm.deactivate_node()
- """
- return self.baremetal_nodes.get_hw_info('hypervisor_version')
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ pm = get_power_manager(node=node, instance=instance)
+ pm.activate_node()
- def get_cpu_info(self):
- """Get cpuinfo information.
+ def get_volume_connector(self, instance):
+ return self.volume_driver.get_volume_connector(instance)
- Obtains cpu feature from virConnect.getCapabilities,
- and returns as a json string.
+ def attach_volume(self, connection_info, instance, mountpoint):
+ return self.volume_driver.attach_volume(connection_info,
+ instance, mountpoint)
- :return: see above description
+ def detach_volume(self, connection_info, instance_name, mountpoint):
+ return self.volume_driver.detach_volume(connection_info,
+ instance_name, mountpoint)
- """
- return self.baremetal_nodes.get_hw_info('cpu_info')
-
- def block_stats(self, instance_name, disk):
- raise NotImplementedError()
-
- def interface_stats(self, instance_name, interface):
- raise NotImplementedError()
-
- def get_console_pool_info(self, console_type):
- #TODO(mdragon): console proxy should be implemented for baremetal,
- # in case someone wants to use it.
- # For now return fake data.
- return {'address': '127.0.0.1',
- 'username': 'fakeuser',
- 'password': 'fakepassword'}
+ def get_info(self, instance):
+ # NOTE(deva): compute/manager.py expects to get NotFound exception
+ # so we convert from InstanceNotFound
+ inst_uuid = instance.get('uuid')
+ node = _get_baremetal_node_by_instance_uuid(inst_uuid)
+ pm = get_power_manager(node=node, instance=instance)
+ ps = power_state.SHUTDOWN
+ if pm.is_power_on():
+ ps = power_state.RUNNING
+ return {'state': ps,
+ 'max_mem': node['memory_mb'],
+ 'mem': node['memory_mb'],
+ 'num_cpu': node['cpus'],
+ 'cpu_time': 0}
def refresh_security_group_rules(self, security_group_id):
- # Bare metal doesn't currently support security groups
- pass
+ self.firewall_driver.refresh_security_group_rules(security_group_id)
+ return True
def refresh_security_group_members(self, security_group_id):
- # Bare metal doesn't currently support security groups
- pass
-
- def refresh_instance_security_rules(self, instance):
- # Bare metal doesn't currently support security groups
- pass
-
- def get_available_resource(self):
- """Updates compute manager resource info on ComputeNode table.
-
- This method is called when nova-coompute launches, and
- whenever admin executes "nova-manage service update_resource".
- """
+ self.firewall_driver.refresh_security_group_members(security_group_id)
+ return True
- # Updating host information
- dic = {'vcpus': self.get_vcpu_total(),
- 'memory_mb': self.get_memory_mb_total(),
- 'local_gb': self.get_local_gb_total(),
- 'vcpus_used': self.get_vcpu_used(),
- 'memory_mb_used': self.get_memory_mb_used(),
- 'local_gb_used': self.get_local_gb_used(),
+ def refresh_provider_fw_rules(self):
+ self.firewall_driver.refresh_provider_fw_rules()
+
+ def _node_resource(self, node):
+ vcpus_used = 0
+ memory_mb_used = 0
+ local_gb_used = 0
+
+ vcpus = node['cpus']
+ memory_mb = node['memory_mb']
+ local_gb = node['local_gb']
+ if node['registration_status'] != 'done' or node['instance_uuid']:
+ vcpus_used = node['cpus']
+ memory_mb_used = node['memory_mb']
+ local_gb_used = node['local_gb']
+
+ dic = {'vcpus': vcpus,
+ 'memory_mb': memory_mb,
+ 'local_gb': local_gb,
+ 'vcpus_used': vcpus_used,
+ 'memory_mb_used': memory_mb_used,
+ 'local_gb_used': local_gb_used,
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
- 'hypervisor_hostname': CONF.host,
- 'cpu_info': self.get_cpu_info(),
- 'cpu_arch': CONF.cpu_arch}
+ 'hypervisor_hostname': str(node['id']),
+ 'cpu_info': 'baremetal cpu',
+ }
+ return dic
- LOG.info(_('#### RLK: cpu_arch = %s ') % CONF.cpu_arch)
+ def refresh_instance_security_rules(self, instance):
+ self.firewall_driver.refresh_instance_security_rules(instance)
+
+ def get_available_resource(self, nodename):
+ context = nova_context.get_admin_context()
+ node = db.bm_node_get(context, nodename)
+ dic = self._node_resource(node)
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
- raise NotImplementedError()
-
- def live_migration(self, ctxt, instance_ref, dest,
- post_method, recover_method):
- raise NotImplementedError()
+ self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
+ self.firewall_driver.prepare_instance_filter(instance_ref,
+ network_info)
- def unfilter_instance(self, instance_ref):
- """See comments of same method in firewall_driver."""
- pass
+ def unfilter_instance(self, instance_ref, network_info):
+ self.firewall_driver.unfilter_instance(instance_ref,
+ network_info=network_info)
def get_host_stats(self, refresh=False):
- """Return the current state of the host. If 'refresh' is
- True, run the update first."""
- LOG.debug(_("Updating!"))
- return self.HostState.get_host_stats(refresh=refresh)
-
-
-class HostState(object):
- """Manages information about the XenServer host this compute
- node is running on.
- """
+ caps = []
+ context = nova_context.get_admin_context()
+ nodes = db.bm_node_get_all(context,
+ service_host=CONF.host)
+ for node in nodes:
+ res = self._node_resource(node)
+ nodename = str(node['id'])
+ data = {}
+ data['vcpus'] = res['vcpus']
+ data['vcpus_used'] = res['vcpus_used']
+ data['cpu_info'] = res['cpu_info']
+ data['disk_total'] = res['local_gb']
+ data['disk_used'] = res['local_gb_used']
+ data['disk_available'] = res['local_gb'] - res['local_gb_used']
+ data['host_memory_total'] = res['memory_mb']
+ data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used']
+ data['hypervisor_type'] = res['hypervisor_type']
+ data['hypervisor_version'] = res['hypervisor_version']
+ data['hypervisor_hostname'] = nodename
+ data['supported_instances'] = self.supported_instances
+ data.update(self.extra_specs)
+ data['host'] = CONF.host
+ data['node'] = nodename
+ # TODO(NTTdocomo): put node's extra specs here
+ caps.append(data)
+ return caps
+
+ def plug_vifs(self, instance, network_info):
+ """Plugin VIFs into networks."""
+ self._plug_vifs(instance, network_info)
+
+ def _plug_vifs(self, instance, network_info, context=None):
+ if not context:
+ context = nova_context.get_admin_context()
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ if node:
+ pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
+ for pif in pifs:
+ if pif['vif_uuid']:
+ db.bm_interface_set_vif_uuid(context, pif['id'], None)
+ for (network, mapping) in network_info:
+ self.vif_driver.plug(instance, (network, mapping))
+
+ def _unplug_vifs(self, instance, network_info):
+ for (network, mapping) in network_info:
+ self.vif_driver.unplug(instance, (network, mapping))
+
+ def manage_image_cache(self, context, all_instances):
+ """Manage the local cache of images."""
+ self.image_cache_manager.verify_base_images(context, all_instances)
- def __init__(self, connection):
- super(HostState, self).__init__()
- self.connection = connection
- self._stats = {}
- self.update_status()
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host. If 'refresh' is
- True, run the update first.
- """
- if refresh:
- self.update_status()
- return self._stats
+ def get_console_output(self, instance):
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ return self.driver.get_console_output(node, instance)
- def update_status(self):
- """
- We can get host status information.
- """
- LOG.debug(_("Updating host stats"))
- data = {}
- data["vcpus"] = self.connection.get_vcpu_total()
- data["vcpus_used"] = self.connection.get_vcpu_used()
- data["cpu_info"] = self.connection.get_cpu_info()
- data["cpu_arch"] = CONF.cpu_arch
- data["disk_total"] = self.connection.get_local_gb_total()
- data["disk_used"] = self.connection.get_local_gb_used()
- data["disk_available"] = data["disk_total"] - data["disk_used"]
- data["host_memory_total"] = self.connection.get_memory_mb_total()
- data["host_memory_free"] = (data["host_memory_total"] -
- self.connection.get_memory_mb_used())
- data["hypervisor_type"] = self.connection.get_hypervisor_type()
- data["hypervisor_version"] = self.connection.get_hypervisor_version()
- data["hypervisor_hostname"] = CONF.host
- self._stats = data
+ def get_available_nodes(self):
+ context = nova_context.get_admin_context()
+ return [str(n['id']) for n in _get_baremetal_nodes(context)]
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
index 635089366..7a400af6f 100644
--- a/nova/virt/baremetal/fake.py
+++ b/nova/virt/baremetal/fake.py
@@ -1,6 +1,8 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2011 University of Southern California
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 University of Southern California / ISI
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -13,145 +15,70 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-#
-
-
-def get_baremetal_nodes():
- return BareMetalNodes()
+from nova.virt.baremetal import base
+from nova.virt.firewall import NoopFirewallDriver
-class BareMetalNodes(object):
- """
- This manages node information and implements singleton.
- BareMetalNodes class handles machine architectures of interest to
- technical computing users have either poor or non-existent support
- for virtualization.
- """
+class FakeDriver(base.NodeDriver):
- def get_hw_info(self, field):
- """
- Returns hardware information of bare-metal node by the given field.
-
- Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
- local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
- """
- return "fake"
+ def cache_images(self, context, node, instance, **kwargs):
+ pass
- def set_status(self, node_id, status):
- """
- Sets status of the given node by the given status.
+ def destroy_images(self, context, node, instance):
+ pass
- Returns 1 if the node is in the nodes list.
- """
- return True
+ def activate_bootloader(self, context, node, instance):
+ pass
- def get_status(self):
- """
- Gets status of the given node.
- """
+ def deactivate_bootloader(self, context, node, instance):
pass
- def get_idle_node(self):
- """
- Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
- """
- return False
-
- def get_ip_by_id(self, id):
- """
- Returns default IP address of the given node.
- """
- return "127.0.0.1"
-
- def free_node(self, node_id):
- """
- Sets/frees status of the given node as 0 (IDLE).
- """
- return False
-
- def power_mgr(self, node_id, mode):
- """
- Changes power state of the given node.
-
- According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
- changed. /tftpboot/pdu_mgr script handles power management of
- PDU (Power Distribution Unit).
- """
+ def activate_node(self, context, node, instance):
+ """For operations after power on."""
pass
- def deactivate_node(self, node_id):
- """
- Deactivates the given node by turnning it off.
- """
+ def deactivate_node(self, context, node, instance):
+ """For operations before power off."""
pass
- def network_set(self, node_ip, mac_address, ip_address):
- """
- Sets network configuration based on the given ip and mac address.
+ def get_console_output(self, node, instance):
+ return 'fake\nconsole\noutput for instance %s' % instance['id']
- User can access the bare-metal node using ssh.
- """
- pass
- def iptables_set(self, node_ip, user_data):
- """
- Sets security setting (iptables:port) if needed.
- """
- pass
+class FakePowerManager(base.PowerManager):
- def check_activated(self, node_id, node_ip):
- """
- Checks whether the given node is activated or not.
- """
- pass
+ def __init__(self, **kwargs):
+ super(FakePowerManager, self).__init__(**kwargs)
- def vmlinux_set(self, node_id, mode):
- """
- Sets kernel into default path (/tftpboot) if needed.
- From basepath to /tftpboot, kernel is set based on the given mode
- such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
- """
- pass
+class FakeFirewallDriver(NoopFirewallDriver):
- def sleep_mgr(self, time):
- """
- Sleeps until the node is activated.
- """
- pass
+ def __init__(self):
+ super(FakeFirewallDriver, self).__init__()
- def ssh_set(self, node_ip):
- """
- Sets and Runs sshd in the node.
- """
- pass
- def activate_node(self, node_id, node_ip, name, mac_address,
- ip_address):
- """
- Activates the given node using ID, IP, and MAC address.
- """
+class FakeVifDriver(object):
+
+ def __init__(self):
+ super(FakeVifDriver, self).__init__()
+
+ def plug(self, instance, vif):
pass
- def get_console_output(self, console_log):
- """
- Gets console output of the given node.
- """
+ def unplug(self, instance, vif):
pass
- def get_image(self, bp):
- """
- Gets the bare-metal file system image into the instance path.
- Noting to do for tilera nodes: actual image is used.
- """
- pass
+class FakeVolumeDriver(object):
- def set_image(self, bpath, node_id):
- """
- Sets the PXE bare-metal file system from the instance path.
+ def __init__(self, virtapi):
+ super(FakeVolumeDriver, self).__init__()
+ self.virtapi = virtapi
+ self._initiator = "fake_initiator"
+
+ def attach_volume(self, connection_info, instance_name, mountpoint):
+ pass
- This should be done after ssh key is injected.
- """
+ def detach_volume(self, connection_info, instance_name, mountpoint):
pass
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
new file mode 100644
index 000000000..97c158727
--- /dev/null
+++ b/nova/virt/baremetal/ipmi.py
@@ -0,0 +1,257 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Baremetal IPMI power manager.
+"""
+
+import os
+import stat
+import tempfile
+
+from nova.exception import InvalidParameterValue
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import paths
+from nova import utils
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import base
+from nova.virt.baremetal import utils as bm_utils
+
+opts = [
+ cfg.StrOpt('terminal',
+ default='shellinaboxd',
+ help='path to baremetal terminal program'),
+ cfg.StrOpt('terminal_cert_dir',
+ default=None,
+ help='path to baremetal terminal SSL cert(PEM)'),
+ cfg.StrOpt('terminal_pid_dir',
+ default=paths.state_path_def('baremetal/console'),
+ help='path to directory stores pidfiles of baremetal_terminal'),
+ cfg.IntOpt('ipmi_power_retry',
+ default=5,
+ help='maximal number of retries for IPMI operations'),
+ ]
+
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
+
+LOG = logging.getLogger(__name__)
+
+
+def _make_password_file(password):
+ fd, path = tempfile.mkstemp()
+ os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
+ with os.fdopen(fd, "w") as f:
+ f.write(password)
+ return path
+
+
+def _get_console_pid_path(node_id):
+ name = "%s.pid" % node_id
+ path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
+ return path
+
+
+def _get_console_pid(node_id):
+ pid_path = _get_console_pid_path(node_id)
+ if os.path.exists(pid_path):
+ with open(pid_path, 'r') as f:
+ pid_str = f.read()
+ try:
+ return int(pid_str)
+ except ValueError:
+ LOG.warn(_("pid file %s does not contain any pid"), pid_path)
+ return None
+
+
+class IPMI(base.PowerManager):
+ """IPMI Power Driver for Baremetal Nova Compute
+
+ This PowerManager class provides mechanism for controlling the power state
+ of physical hardware via IPMI calls. It also provides serial console access
+ where available.
+
+ """
+
+ def __init__(self, node, **kwargs):
+ self.state = None
+ self.retries = None
+ self.node_id = node['id']
+ self.address = node['pm_address']
+ self.user = node['pm_user']
+ self.password = node['pm_password']
+ self.port = node['terminal_port']
+
+ if self.node_id == None:
+ raise InvalidParameterValue(_("Node id not supplied to IPMI"))
+ if self.address == None:
+ raise InvalidParameterValue(_("Address not supplied to IPMI"))
+ if self.user == None:
+ raise InvalidParameterValue(_("User not supplied to IPMI"))
+ if self.password == None:
+ raise InvalidParameterValue(_("Password not supplied to IPMI"))
+
+ def _exec_ipmitool(self, command):
+ args = ['ipmitool',
+ '-I',
+ 'lanplus',
+ '-H',
+ self.address,
+ '-U',
+ self.user,
+ '-f']
+ pwfile = _make_password_file(self.password)
+ try:
+ args.append(pwfile)
+ args.extend(command.split(" "))
+ out, err = utils.execute(*args, attempts=3)
+ LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)%s'"),
+ locals())
+ return out, err
+ finally:
+ bm_utils.unlink_without_raise(pwfile)
+
+ def _is_power(self, state):
+ out_err = self._exec_ipmitool("power status")
+ return out_err[0] == ("Chassis Power is %s\n" % state)
+
+ def _power_on(self):
+ """Turn the power to this node ON."""
+
+ def _wait_for_power_on():
+ """Called at an interval until the node's power is on."""
+
+ if self._is_power("on"):
+ self.state = baremetal_states.ACTIVE
+ raise utils.LoopingCallDone()
+ if self.retries > CONF.baremetal.ipmi_power_retry:
+ self.state = baremetal_states.ERROR
+ raise utils.LoopingCallDone()
+ try:
+ self.retries += 1
+ self._exec_ipmitool("power on")
+ except Exception:
+ LOG.exception(_("IPMI power on failed"))
+
+ self.retries = 0
+ timer = utils.FixedIntervalLoopingCall(_wait_for_power_on)
+ timer.start(interval=0.5).wait()
+
+ def _power_off(self):
+ """Turn the power to this node OFF."""
+
+ def _wait_for_power_off():
+ """Called at an interval until the node's power is off."""
+
+ if self._is_power("off"):
+ self.state = baremetal_states.DELETED
+ raise utils.LoopingCallDone()
+ if self.retries > CONF.baremetal.ipmi_power_retry:
+ self.state = baremetal_states.ERROR
+ raise utils.LoopingCallDone()
+ try:
+ self.retries += 1
+ self._exec_ipmitool("power off")
+ except Exception:
+ LOG.exception(_("IPMI power off failed"))
+
+ self.retries = 0
+ timer = utils.FixedIntervalLoopingCall(_wait_for_power_off)
+ timer.start(interval=0.5).wait()
+
+ def _set_pxe_for_next_boot(self):
+ try:
+ self._exec_ipmitool("chassis bootdev pxe")
+ except Exception:
+ LOG.exception(_("IPMI set next bootdev failed"))
+
+ def activate_node(self):
+ """Turns the power to node ON."""
+ if self._is_power("on") and self.state == baremetal_states.ACTIVE:
+ LOG.warning(_("Activate node called, but node %s "
+ "is already active") % self.address)
+ self._set_pxe_for_next_boot()
+ self._power_on()
+ return self.state
+
+ def reboot_node(self):
+ """Cycles the power to a node."""
+ self._power_off()
+ self._set_pxe_for_next_boot()
+ self._power_on()
+ return self.state
+
+ def deactivate_node(self):
+ """Turns the power to node OFF, regardless of current state."""
+ self._power_off()
+ return self.state
+
+ def is_power_on(self):
+ return self._is_power("on")
+
+ def start_console(self):
+ if not self.port:
+ return
+ args = []
+ args.append(CONF.baremetal.terminal)
+ if CONF.baremetal.terminal_cert_dir:
+ args.append("-c")
+ args.append(CONF.baremetal.terminal_cert_dir)
+ else:
+ args.append("-t")
+ args.append("-p")
+ args.append(str(self.port))
+ args.append("--background=%s" % _get_console_pid_path(self.node_id))
+ args.append("-s")
+
+ try:
+ pwfile = _make_password_file(self.password)
+ ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
+ " -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
+ % {'uid': os.getuid(),
+ 'gid': os.getgid(),
+ 'address': self.address,
+ 'user': self.user,
+ 'pwfile': pwfile,
+ }
+
+ args.append(ipmi_args)
+ # Run shellinaboxd without pipes. Otherwise utils.execute() waits
+ # infinitely since shellinaboxd does not close passed fds.
+ x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
+ x.append('</dev/null')
+ x.append('>/dev/null')
+ x.append('2>&1')
+ utils.execute(' '.join(x), shell=True)
+ finally:
+ bm_utils.unlink_without_raise(pwfile)
+
+ def stop_console(self):
+ console_pid = _get_console_pid(self.node_id)
+ if console_pid:
+ # Allow exitcode 99 (RC_UNAUTHORIZED)
+ utils.execute('kill', '-TERM', str(console_pid),
+ run_as_root=True,
+ check_exit_code=[0, 99])
+ bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
diff --git a/nova/virt/baremetal/net-dhcp.ubuntu.template b/nova/virt/baremetal/net-dhcp.ubuntu.template
new file mode 100644
index 000000000..e8824a88d
--- /dev/null
+++ b/nova/virt/baremetal/net-dhcp.ubuntu.template
@@ -0,0 +1,21 @@
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+#for $ifc in $interfaces
+auto ${ifc.name}
+iface ${ifc.name} inet dhcp
+#if $ifc.hwaddress
+ hwaddress ether ${ifc.hwaddress}
+#end if
+
+#if $use_ipv6
+iface ${ifc.name} inet6 dhcp
+#end if
+
+#end for
diff --git a/nova/virt/baremetal/net-static.ubuntu.template b/nova/virt/baremetal/net-static.ubuntu.template
new file mode 100644
index 000000000..f14f0ce8c
--- /dev/null
+++ b/nova/virt/baremetal/net-static.ubuntu.template
@@ -0,0 +1,30 @@
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+#for $ifc in $interfaces
+auto ${ifc.name}
+iface ${ifc.name} inet static
+ address ${ifc.address}
+ netmask ${ifc.netmask}
+ gateway ${ifc.gateway}
+#if $ifc.dns
+ dns-nameservers ${ifc.dns}
+#end if
+#if $ifc.hwaddress
+ hwaddress ether ${ifc.hwaddress}
+#end if
+
+#if $use_ipv6
+iface ${ifc.name} inet6 static
+ address ${ifc.address_v6}
+ netmask ${ifc.netmask_v6}
+ gateway ${ifc.gateway_v6}
+#end if
+
+#end for
diff --git a/nova/virt/baremetal/nodes.py b/nova/virt/baremetal/nodes.py
deleted file mode 100644
index d4bd90100..000000000
--- a/nova/virt/baremetal/nodes.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from nova import config
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.virt.baremetal import fake
-from nova.virt.baremetal import tilera
-
-baremetal_opts = [
- cfg.StrOpt('baremetal_driver',
- default='tilera',
- help='Bare-metal driver runs on')
- ]
-
-CONF = config.CONF
-CONF.register_opts(baremetal_opts)
-
-
-def get_baremetal_nodes():
- d = CONF.baremetal_driver
- if d == 'tilera':
- return tilera.get_baremetal_nodes()
- elif d == 'fake':
- return fake.get_baremetal_nodes()
- else:
- raise exception.NovaException(_("Unknown baremetal driver %(d)s"))
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
new file mode 100644
index 000000000..b94ac9032
--- /dev/null
+++ b/nova/virt/baremetal/pxe.py
@@ -0,0 +1,449 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Class for PXE bare-metal nodes.
+"""
+
+import os
+import shutil
+
+from nova.compute import instance_types
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import fileutils
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.baremetal import base
+from nova.virt.baremetal import db
+from nova.virt.baremetal import utils as bm_utils
+from nova.virt.disk import api as disk
+
+
+pxe_opts = [
+ cfg.StrOpt('deploy_kernel',
+ help='Default kernel image ID used in deployment phase'),
+ cfg.StrOpt('deploy_ramdisk',
+ help='Default ramdisk image ID used in deployment phase'),
+ cfg.StrOpt('net_config_template',
+ default='$pybasedir/nova/virt/baremetal/'
+ 'net-dhcp.ubuntu.template',
+ help='Template file for injected network config'),
+ cfg.StrOpt('pxe_append_params',
+ help='additional append parameters for baremetal PXE boot'),
+ cfg.StrOpt('pxe_config_template',
+ default='$pybasedir/nova/virt/baremetal/pxe_config.template',
+ help='Template file for PXE configuration'),
+ ]
+
+LOG = logging.getLogger(__name__)
+
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(pxe_opts, baremetal_group)
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+CHEETAH = None
+
+
+def _get_cheetah():
+ global CHEETAH
+ if CHEETAH is None:
+ from Cheetah.Template import Template as CHEETAH
+ return CHEETAH
+
+
+def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
+ deployment_aki_path, deployment_ari_path,
+ aki_path, ari_path):
+ """Build the PXE config file for a node
+
+ This method builds the PXE boot configuration file for a node,
+ given all the required parameters.
+
+ The resulting file has both a "deploy" and "boot" label, which correspond
+ to the two phases of booting. This may be extended later.
+
+ """
+ LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
+ pxe_options = {
+ 'deployment_id': deployment_id,
+ 'deployment_key': deployment_key,
+ 'deployment_iscsi_iqn': deployment_iscsi_iqn,
+ 'deployment_aki_path': deployment_aki_path,
+ 'deployment_ari_path': deployment_ari_path,
+ 'aki_path': aki_path,
+ 'ari_path': ari_path,
+ 'pxe_append_params': CONF.baremetal.pxe_append_params,
+ }
+ cheetah = _get_cheetah()
+ pxe_config = str(cheetah(
+ open(CONF.baremetal.pxe_config_template).read(),
+ searchList=[{'pxe_options': pxe_options,
+ 'ROOT': '${ROOT}',
+ }]))
+ return pxe_config
+
+
+def build_network_config(network_info):
+ # TODO(deva): fix assumption that device names begin with "eth"
+ # and fix assumption about ordering
+ try:
+ assert isinstance(network_info, list)
+ except AssertionError:
+ network_info = [network_info]
+ interfaces = []
+ for id, (network, mapping) in enumerate(network_info):
+ address_v6 = None
+ gateway_v6 = None
+ netmask_v6 = None
+ if CONF.use_ipv6:
+ address_v6 = mapping['ip6s'][0]['ip']
+ netmask_v6 = mapping['ip6s'][0]['netmask']
+ gateway_v6 = mapping['gateway_v6']
+ interface = {
+ 'name': 'eth%d' % id,
+ 'hwaddress': mapping['mac'],
+ 'address': mapping['ips'][0]['ip'],
+ 'gateway': mapping['gateway'],
+ 'netmask': mapping['ips'][0]['netmask'],
+ 'dns': ' '.join(mapping['dns']),
+ 'address_v6': address_v6,
+ 'gateway_v6': gateway_v6,
+ 'netmask_v6': netmask_v6,
+ }
+ interfaces.append(interface)
+
+ cheetah = _get_cheetah()
+ network_config = str(cheetah(
+ open(CONF.baremetal.net_config_template).read(),
+ searchList=[
+ {'interfaces': interfaces,
+ 'use_ipv6': CONF.use_ipv6,
+ }
+ ]))
+ return network_config
+
+
+def get_deploy_aki_id(instance):
+ return instance.get('extra_specs', {}).\
+ get('deploy_kernel_id', CONF.baremetal.deploy_kernel)
+
+
+def get_deploy_ari_id(instance):
+ return instance.get('extra_specs', {}).\
+ get('deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
+
+
+def get_image_dir_path(instance):
+ """Generate the dir for an instances disk."""
+ return os.path.join(CONF.instances_path, instance['name'])
+
+
+def get_image_file_path(instance):
+ """Generate the full path for an instances disk."""
+ return os.path.join(CONF.instances_path, instance['name'], 'disk')
+
+
+def get_pxe_config_file_path(instance):
+ """Generate the path for an instances PXE config file."""
+ return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
+
+
+def get_partition_sizes(instance):
+ type_id = instance['instance_type_id']
+ root_mb = instance['root_gb'] * 1024
+
+ # NOTE(deva): is there a way to get swap_mb directly from instance?
+ swap_mb = instance_types.get_instance_type(type_id)['swap']
+
+ # NOTE(deva): For simpler code paths on the deployment side,
+ # we always create a swap partition. If the flavor
+ # does not specify any swap, we default to 1MB
+ if swap_mb < 1:
+ swap_mb = 1
+
+ return (root_mb, swap_mb)
+
+
+def get_pxe_mac_path(mac):
+ """Convert a MAC address into a PXE config file name."""
+ return os.path.join(
+ CONF.baremetal.tftp_root,
+ 'pxelinux.cfg',
+ "01-" + mac.replace(":", "-").lower()
+ )
+
+
+def get_tftp_image_info(instance):
+ """Generate the paths for tftp files for this instance
+
+ Raises NovaException if
+ - instance does not contain kernel_id or ramdisk_id
+ - deploy_kernel_id or deploy_ramdisk_id can not be read from
+ instance['extra_specs'] and defaults are not set
+
+ """
+ image_info = {
+ 'kernel': [None, None],
+ 'ramdisk': [None, None],
+ 'deploy_kernel': [None, None],
+ 'deploy_ramdisk': [None, None],
+ }
+ try:
+ image_info['kernel'][0] = str(instance['kernel_id'])
+ image_info['ramdisk'][0] = str(instance['ramdisk_id'])
+ image_info['deploy_kernel'][0] = get_deploy_aki_id(instance)
+ image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance)
+ except KeyError as e:
+ pass
+
+ missing_labels = []
+ for label in image_info.keys():
+ (uuid, path) = image_info[label]
+ if uuid is None:
+ missing_labels.append(label)
+ else:
+ image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
+ instance['uuid'], label)
+ if missing_labels:
+ raise exception.NovaException(_(
+ "Can not activate PXE bootloader. The following boot parameters "
+ "were not passed to baremetal driver: %s") % missing_labels)
+ return image_info
+
+
+class PXE(base.NodeDriver):
+ """PXE bare metal driver."""
+
+ def __init__(self):
+ super(PXE, self).__init__()
+
+ def _collect_mac_addresses(self, context, node):
+ macs = []
+ macs.append(db.bm_node_get(context, node['id'])['prov_mac_address'])
+ for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
+ if nic['address']:
+ macs.append(nic['address'])
+ macs.sort()
+ return macs
+
+ def _generate_udev_rules(self, context, node):
+ # TODO(deva): fix assumption that device names begin with "eth"
+ # and fix assumption of ordering
+ macs = self._collect_mac_addresses(context, node)
+ rules = ''
+ for (i, mac) in enumerate(macs):
+ rules += 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' \
+ 'ATTR{address}=="%(mac)s", ATTR{dev_id}=="0x0", ' \
+ 'ATTR{type}=="1", KERNEL=="eth*", NAME="%(name)s"\n' \
+ % {'mac': mac.lower(),
+ 'name': 'eth%d' % i,
+ }
+ return rules
+
+ def _cache_tftp_images(self, context, instance, image_info):
+ """Fetch the necessary kernels and ramdisks for the instance."""
+ fileutils.ensure_tree(
+ os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
+
+ LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
+ instance['name'])
+ for label in image_info.keys():
+ (uuid, path) = image_info[label]
+ bm_utils.cache_image(
+ context=context,
+ target=path,
+ image_id=uuid,
+ user_id=instance['user_id'],
+ project_id=instance['project_id'],
+ )
+
+ def _cache_image(self, context, instance, image_meta):
+ """Fetch the instance's image from Glance
+
+ This method pulls the relevant AMI and associated kernel and ramdisk,
+ and the deploy kernel and ramdisk from Glance, and writes them
+ to the appropriate places on local disk.
+
+ Both sets of kernel and ramdisk are needed for PXE booting, so these
+ are stored under CONF.baremetal.tftp_root.
+
+ At present, the AMI is cached and certain files are injected.
+ Debian/ubuntu-specific assumptions are made regarding the injected
+ files. In a future revision, this functionality will be replaced by a
+ more scalable and os-agnostic approach: the deployment ramdisk will
+ fetch from Glance directly, and write its own last-mile configuration.
+
+ """
+ fileutils.ensure_tree(get_image_dir_path(instance))
+ image_path = get_image_file_path(instance)
+
+ LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
+ {'ami': image_meta['id'], 'name': instance['name']})
+ bm_utils.cache_image(context=context,
+ target=image_path,
+ image_id=image_meta['id'],
+ user_id=instance['user_id'],
+ project_id=instance['project_id']
+ )
+
+ return [image_meta['id'], image_path]
+
+ def _inject_into_image(self, context, node, instance, network_info,
+ injected_files=None, admin_password=None):
+ """Inject last-mile configuration into instances image
+
+ Much of this method is a hack around DHCP and cloud-init
+ not working together with baremetal provisioning yet.
+
+ """
+ # NOTE(deva): We assume that if we're not using a kernel,
+ # then the target partition is the first partition
+ partition = None
+ if not instance['kernel_id']:
+ partition = "1"
+
+ ssh_key = None
+ if 'key_data' in instance and instance['key_data']:
+ ssh_key = str(instance['key_data'])
+
+ if injected_files is None:
+ injected_files = []
+
+ net_config = build_network_config(network_info)
+ udev_rules = self._generate_udev_rules(context, node)
+ injected_files.append(
+ ('/etc/udev/rules.d/70-persistent-net.rules', udev_rules))
+
+ if instance['hostname']:
+ injected_files.append(('/etc/hostname', instance['hostname']))
+
+ LOG.debug(_("Injecting files into image for instance %(name)s") %
+ {'name': instance['name']})
+
+ bm_utils.inject_into_image(
+ image=get_image_file_path(instance),
+ key=ssh_key,
+ net=net_config,
+ metadata=instance['metadata'],
+ admin_password=admin_password,
+ files=injected_files,
+ partition=partition,
+ )
+
+ def cache_images(self, context, node, instance,
+ admin_password, image_meta, injected_files, network_info):
+ """Prepare all the images for this instance."""
+ tftp_image_info = get_tftp_image_info(instance)
+ self._cache_tftp_images(context, instance, tftp_image_info)
+
+ self._cache_image(context, instance, image_meta)
+ self._inject_into_image(context, node, instance, network_info,
+ injected_files, admin_password)
+
+ def destroy_images(self, context, node, instance):
+ """Delete instance's image file."""
+ bm_utils.unlink_without_raise(get_image_file_path(instance))
+ bm_utils.unlink_without_raise(get_image_dir_path(instance))
+
+ def activate_bootloader(self, context, node, instance):
+ """Configure PXE boot loader for an instance
+
+ Kernel and ramdisk images are downloaded by cache_tftp_images,
+ and stored in /tftpboot/{uuid}/
+
+ This method writes the instances config file, and then creates
+ symlinks for each MAC address in the instance.
+
+ By default, the complete layout looks like this:
+
+ /tftpboot/
+ ./{uuid}/
+ kernel
+ ramdisk
+ deploy_kernel
+ deploy_ramdisk
+ config
+ ./pxelinux.cfg/
+ {mac} -> ../{uuid}/config
+
+ """
+ image_info = get_tftp_image_info(instance)
+ (root_mb, swap_mb) = get_partition_sizes(instance)
+ pxe_config_file_path = get_pxe_config_file_path(instance)
+ image_file_path = get_image_file_path(instance)
+
+ deployment_key = bm_utils.random_alnum(32)
+ deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
+ deployment_id = db.bm_deployment_create(
+ context,
+ deployment_key,
+ image_file_path,
+ pxe_config_file_path,
+ root_mb,
+ swap_mb
+ )
+ pxe_config = build_pxe_config(
+ deployment_id,
+ deployment_key,
+ deployment_iscsi_iqn,
+ image_info['deploy_kernel'][1],
+ image_info['deploy_ramdisk'][1],
+ image_info['kernel'][1],
+ image_info['ramdisk'][1],
+ )
+ bm_utils.write_to_file(pxe_config_file_path, pxe_config)
+
+ macs = self._collect_mac_addresses(context, node)
+ for mac in macs:
+ mac_path = get_pxe_mac_path(mac)
+ bm_utils.unlink_without_raise(mac_path)
+ bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
+
+ def deactivate_bootloader(self, context, node, instance):
+ """Delete PXE bootloader images and config."""
+ try:
+ image_info = get_tftp_image_info(instance)
+ except exception.NovaException:
+ pass
+ else:
+ for label in image_info.keys():
+ (uuid, path) = image_info[label]
+ bm_utils.unlink_without_raise(path)
+
+ bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
+ try:
+ macs = self._collect_mac_addresses(context, node)
+ except exception.DBError:
+ pass
+ else:
+ for mac in macs:
+ bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
+
+ bm_utils.unlink_without_raise(
+ os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
+
+ def activate_node(self, context, node, instance):
+ pass
+
+ def deactivate_node(self, context, node, instance):
+ pass
diff --git a/nova/virt/baremetal/pxe_config.template b/nova/virt/baremetal/pxe_config.template
new file mode 100644
index 000000000..f2fcc9b14
--- /dev/null
+++ b/nova/virt/baremetal/pxe_config.template
@@ -0,0 +1,11 @@
+default deploy
+
+label deploy
+kernel ${pxe_options.deployment_aki_path}
+append initrd=${pxe_options.deployment_ari_path} selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=${pxe_options.deployment_iscsi_iqn} deployment_id=${pxe_options.deployment_id} deployment_key=${pxe_options.deployment_key} ${pxe_options.pxe_append_params}
+ipappend 3
+
+
+label boot
+kernel ${pxe_options.aki_path}
+append initrd=${pxe_options.ari_path} root=${ROOT} ro ${pxe_options.pxe_append_params}
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
deleted file mode 100644
index c0343bac4..000000000
--- a/nova/virt/baremetal/tilera.py
+++ /dev/null
@@ -1,365 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tilera back-end for bare-metal compute node provisioning
-
-The details of this implementation are specific to ISI's testbed. This code
-is provided here as an example of how to implement a backend.
-"""
-
-import base64
-import subprocess
-import time
-
-from nova.compute import power_state
-from nova import config
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova import utils
-
-CONF = config.CONF
-
-tilera_opts = [
- cfg.StrOpt('tile_monitor',
- default='/usr/local/TileraMDE/bin/tile-monitor',
- help='Tilera command line program for Bare-metal driver')
- ]
-
-CONF.register_opts(tilera_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-def get_baremetal_nodes():
- return BareMetalNodes()
-
-
-class BareMetalNodes(object):
- """
- This manages node information and implements singleton.
-
- BareMetalNodes class handles machine architectures of interest to
- technical computing users have either poor or non-existent support
- for virtualization.
- """
-
- _instance = None
- _is_init = False
-
- def __new__(cls, *args, **kwargs):
- """
- Returns the BareMetalNodes singleton.
- """
- if not cls._instance or ('new' in kwargs and kwargs['new']):
- cls._instance = super(BareMetalNodes, cls).__new__(cls)
- return cls._instance
-
- def __init__(self, file_name="/tftpboot/tilera_boards"):
- """
- Only call __init__ the first time object is instantiated.
-
- From the bare-metal node list file: /tftpboot/tilera_boards,
- reads each item of each node such as node ID, IP address,
- MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
- and appends each node information into nodes list.
- """
- if self._is_init:
- return
- self._is_init = True
-
- self.nodes = []
- self.BOARD_ID = 0
- self.IP_ADDR = 1
- self.MAC_ADDR = 2
- self.VCPUS = 3
- self.MEMORY_MB = 4
- self.LOCAL_GB = 5
- self.MEMORY_MB_USED = 6
- self.LOCAL_GB_USED = 7
- self.HYPERVISOR_TYPE = 8
- self.HYPERVISOR_VER = 9
- self.CPU_INFO = 10
-
- fp = open(file_name, "r")
- for item in fp:
- l = item.split()
- if l[0] == '#':
- continue
- l_d = {'node_id': int(l[self.BOARD_ID]),
- 'ip_addr': l[self.IP_ADDR],
- 'mac_addr': l[self.MAC_ADDR],
- 'status': power_state.NOSTATE,
- 'vcpus': int(l[self.VCPUS]),
- 'memory_mb': int(l[self.MEMORY_MB]),
- 'local_gb': int(l[self.LOCAL_GB]),
- 'memory_mb_used': int(l[self.MEMORY_MB_USED]),
- 'local_gb_used': int(l[self.LOCAL_GB_USED]),
- 'hypervisor_type': l[self.HYPERVISOR_TYPE],
- 'hypervisor_version': int(l[self.HYPERVISOR_VER]),
- 'cpu_info': l[self.CPU_INFO]}
- self.nodes.append(l_d)
- fp.close()
-
- def get_hw_info(self, field):
- """
- Returns hardware information of bare-metal node by the given field.
-
- Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
- local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
- """
- for node in self.nodes:
- if node['node_id'] == 9:
- if field == 'vcpus':
- return node['vcpus']
- elif field == 'memory_mb':
- return node['memory_mb']
- elif field == 'local_gb':
- return node['local_gb']
- elif field == 'memory_mb_used':
- return node['memory_mb_used']
- elif field == 'local_gb_used':
- return node['local_gb_used']
- elif field == 'hypervisor_type':
- return node['hypervisor_type']
- elif field == 'hypervisor_version':
- return node['hypervisor_version']
- elif field == 'cpu_info':
- return node['cpu_info']
-
- def set_status(self, node_id, status):
- """
- Sets status of the given node by the given status.
-
- Returns 1 if the node is in the nodes list.
- """
- for node in self.nodes:
- if node['node_id'] == node_id:
- node['status'] = status
- return True
- return False
-
- def get_status(self):
- """
- Gets status of the given node.
- """
- pass
-
- def get_idle_node(self):
- """
- Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
- """
- for item in self.nodes:
- if item['status'] == 0:
- item['status'] = 1 # make status RUNNING
- return item['node_id']
- raise exception.NotFound("No free nodes available")
-
- def get_ip_by_id(self, id):
- """
- Returns default IP address of the given node.
- """
- for item in self.nodes:
- if item['node_id'] == id:
- return item['ip_addr']
-
- def free_node(self, node_id):
- """
- Sets/frees status of the given node as 0 (IDLE).
- """
- LOG.debug(_("free_node...."))
- for item in self.nodes:
- if item['node_id'] == str(node_id):
- item['status'] = 0 # make status IDLE
-
- def power_mgr(self, node_id, mode):
- """
- Changes power state of the given node.
-
- According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
- changed. /tftpboot/pdu_mgr script handles power management of
- PDU (Power Distribution Unit).
- """
- if node_id < 5:
- pdu_num = 1
- pdu_outlet_num = node_id + 5
- else:
- pdu_num = 2
- pdu_outlet_num = node_id
- path1 = "10.0.100." + str(pdu_num)
- utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num),
- str(mode), '>>', 'pdu_output')
-
- def deactivate_node(self, node_id):
- """
- Deactivates the given node by turnning it off.
-
- /tftpboot/fs_x directory is a NFS of node#x
- and /tftpboot/root_x file is an file system image of node#x.
- """
- node_ip = self.get_ip_by_id(node_id)
- LOG.debug(_("deactivate_node is called for "
- "node_id = %(id)s node_ip = %(ip)s"),
- {'id': str(node_id), 'ip': node_ip})
- for item in self.nodes:
- if item['node_id'] == node_id:
- LOG.debug(_("status of node is set to 0"))
- item['status'] = 0
- self.power_mgr(node_id, 2)
- self.sleep_mgr(5)
- path = "/tftpboot/fs_" + str(node_id)
- pathx = "/tftpboot/root_" + str(node_id)
- utils.execute('sudo', '/usr/sbin/rpc.mountd')
- try:
- utils.execute('sudo', 'umount', '-f', pathx)
- utils.execute('sudo', 'rm', '-f', pathx)
- except Exception:
- LOG.debug(_("rootfs is already removed"))
-
- def network_set(self, node_ip, mac_address, ip_address):
- """
- Sets network configuration based on the given ip and mac address.
-
- User can access the bare-metal node using ssh.
- """
- cmd = (CONF.tile_monitor +
- " --resume --net " + node_ip + " --run - " +
- "ifconfig xgbe0 hw ether " + mac_address +
- " - --wait --run - ifconfig xgbe0 " + ip_address +
- " - --wait --quit")
- subprocess.Popen(cmd, shell=True)
- #utils.execute(cmd, shell=True)
- self.sleep_mgr(5)
-
- def iptables_set(self, node_ip, user_data):
- """
- Sets security setting (iptables:port) if needed.
-
- iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
- /tftpboot/iptables_rule script sets iptables rule on the given node.
- """
- if user_data != '':
- open_ip = base64.b64decode(user_data)
- utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
-
- def check_activated(self, node_id, node_ip):
- """
- Checks whether the given node is activated or not.
- """
- LOG.debug(_("Before ping to the bare-metal node"))
- tile_output = "/tftpboot/tile_output_" + str(node_id)
- grep_cmd = ("ping -c1 " + node_ip + " | grep Unreachable > " +
- tile_output)
- subprocess.Popen(grep_cmd, shell=True)
- self.sleep_mgr(5)
-
- file = open(tile_output, "r")
- out_msg = file.readline().find("Unreachable")
- utils.execute('sudo', 'rm', tile_output)
- if out_msg == -1:
- LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"),
- locals())
- return True
- else:
- LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
- " out_msg=%(out_msg)s"), locals())
- self.power_mgr(node_id, 2)
- return False
-
- def vmlinux_set(self, node_id, mode):
- """
- Sets kernel into default path (/tftpboot) if needed.
-
- From basepath to /tftpboot, kernel is set based on the given mode
- such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
- """
- LOG.debug(_("Noting to do for tilera nodes: vmlinux is in CF"))
-
- def sleep_mgr(self, time_in_seconds):
- """
- Sleeps until the node is activated.
- """
- time.sleep(time_in_seconds)
-
- def ssh_set(self, node_ip):
- """
- Sets and Runs sshd in the node.
- """
- cmd = (CONF.tile_monitor +
- " --resume --net " + node_ip + " --run - " +
- "/usr/sbin/sshd - --wait --quit")
- subprocess.Popen(cmd, shell=True)
- self.sleep_mgr(5)
-
- def activate_node(self, node_id, node_ip, name, mac_address,
- ip_address, user_data):
- """
- Activates the given node using ID, IP, and MAC address.
- """
- LOG.debug(_("activate_node"))
-
- self.power_mgr(node_id, 2)
- self.power_mgr(node_id, 3)
- self.sleep_mgr(100)
-
- try:
- self.check_activated(node_id, node_ip)
- self.network_set(node_ip, mac_address, ip_address)
- self.ssh_set(node_ip)
- self.iptables_set(node_ip, user_data)
- return power_state.RUNNING
- except Exception as ex:
- self.deactivate_node(node_id)
- raise exception.NovaException(_("Node is unknown error state."))
-
- def get_console_output(self, console_log, node_id):
- """
- Gets console output of the given node.
- """
- node_ip = self.get_ip_by_id(node_id)
- log_path = "/tftpboot/log_" + str(node_id)
- kmsg_cmd = (CONF.tile_monitor +
- " --resume --net " + node_ip +
- " -- dmesg > " + log_path)
- subprocess.Popen(kmsg_cmd, shell=True)
- self.sleep_mgr(5)
- utils.execute('cp', log_path, console_log)
-
- def get_image(self, bp):
- """
- Gets the bare-metal file system image into the instance path.
-
- Noting to do for tilera nodes: actual image is used.
- """
- path_fs = "/tftpboot/tilera_fs"
- path_root = bp + "/root"
- utils.execute('cp', path_fs, path_root)
-
- def set_image(self, bpath, node_id):
- """
- Sets the PXE bare-metal file system from the instance path.
-
- This should be done after ssh key is injected.
- /tftpboot/fs_x directory is a NFS of node#x.
- /tftpboot/root_x file is an file system image of node#x.
- """
- path1 = bpath + "/root"
- pathx = "/tftpboot/root_" + str(node_id)
- path2 = "/tftpboot/fs_" + str(node_id)
- utils.execute('sudo', 'mv', path1, pathx)
- utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)
diff --git a/nova/virt/baremetal/utils.py b/nova/virt/baremetal/utils.py
new file mode 100644
index 000000000..0842ae201
--- /dev/null
+++ b/nova/virt/baremetal/utils.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova.openstack.common import log as logging
+from nova.virt.disk import api as disk_api
+from nova.virt.libvirt import utils as libvirt_utils
+
+LOG = logging.getLogger(__name__)
+
+
+def cache_image(context, target, image_id, user_id, project_id):
+ if not os.path.exists(target):
+ libvirt_utils.fetch_image(context, target, image_id,
+ user_id, project_id)
+
+
+def inject_into_image(image, key, net, metadata, admin_password,
+ files, partition, use_cow=False):
+ try:
+ disk_api.inject_data(image, key, net, metadata, admin_password,
+ files, partition, use_cow)
+ except Exception as e:
+ LOG.warn(_("Failed to inject data into image %(image)s. "
+ "Error: %(e)s") % locals())
+
+
+def unlink_without_raise(path):
+ try:
+ os.unlink(path)
+ except OSError:
+ LOG.exception(_("Failed to unlink %s") % path)
+
+
+def write_to_file(path, contents):
+ with open(path, 'w') as f:
+ f.write(contents)
+
+
+def create_link_without_raise(source, link):
+ try:
+ os.symlink(source, link)
+ except OSError:
+ LOG.exception(_("Failed to create symlink from %(source)s to %(link)s")
+ % locals())
+
+
+def random_alnum(count):
+ import random
+ import string
+ chars = string.ascii_uppercase + string.digits
+ return "".join(random.choice(chars) for _ in range(count))
diff --git a/nova/virt/baremetal/vif_driver.py b/nova/virt/baremetal/vif_driver.py
new file mode 100644
index 000000000..08e68c250
--- /dev/null
+++ b/nova/virt/baremetal/vif_driver.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.baremetal import db as bmdb
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class BareMetalVIFDriver(object):
+
+ def _after_plug(self, instance, network, mapping, pif):
+ pass
+
+ def _after_unplug(self, instance, network, mapping, pif):
+ pass
+
+ def plug(self, instance, vif):
+ LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(vif)s")
+ % {'uuid': instance['uuid'], 'vif': vif})
+ network, mapping = vif
+ vif_uuid = mapping['vif_uuid']
+ ctx = context.get_admin_context()
+ node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
+
+ # TODO(deva): optimize this database query
+ # this is just searching for a free physical interface
+ pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
+ for pif in pifs:
+ if not pif['vif_uuid']:
+ bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
+ LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)")
+ % {'id': pif['id'], 'vif_uuid': vif_uuid})
+ self._after_plug(instance, network, mapping, pif)
+ return
+
+ # NOTE(deva): should this really be raising an exception
+ # when there are no physical interfaces left?
+ raise exception.NovaException(_(
+ "Baremetal node: %(id)s has no available physical interface"
+ " for virtual interface %(vif_uuid)s")
+ % {'id': node['id'], 'vif_uuid': vif_uuid})
+
+ def unplug(self, instance, vif):
+ LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(vif)s"),
+ {'uuid': instance['uuid'], 'vif': vif})
+ network, mapping = vif
+ vif_uuid = mapping['vif_uuid']
+ ctx = context.get_admin_context()
+ try:
+ pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
+ bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None)
+ LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)")
+ % {'id': pif['id'], 'vif_uuid': vif_uuid})
+ self._after_unplug(instance, network, mapping, pif)
+ except exception.NovaException:
+ LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
new file mode 100644
index 000000000..2e6f82b93
--- /dev/null
+++ b/nova/virt/baremetal/volume_driver.py
@@ -0,0 +1,267 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from nova import context as nova_context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.baremetal import db as bmdb
+from nova.virt.libvirt import utils as libvirt_utils
+
+opts = [
+ cfg.BoolOpt('use_unsafe_iscsi',
+ default=False,
+ help='Do not set this out of dev/test environments. '
+ 'If a node does not have an fixed PXE IP address, '
+ 'volumes are exported with globally opened ACL'),
+ cfg.StrOpt('iscsi_iqn_prefix',
+ default='iqn.2010-10.org.openstack.baremetal',
+ help='iSCSI IQN prefix used in baremetal volume connections.'),
+ ]
+
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
+
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('libvirt_volume_drivers', 'nova.virt.libvirt.driver')
+
+LOG = logging.getLogger(__name__)
+
+
+def _get_baremetal_node_by_instance_uuid(instance_uuid):
+ context = nova_context.get_admin_context()
+ return bmdb.bm_node_get_by_instance_uuid(context, instance_uuid)
+
+
+def _create_iscsi_export_tgtadm(path, tid, iqn):
+ utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'target',
+ '--op', 'new',
+ '--tid', tid,
+ '--targetname', iqn,
+ run_as_root=True)
+ utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'logicalunit',
+ '--op', 'new',
+ '--tid', tid,
+ '--lun', '1',
+ '--backing-store', path,
+ run_as_root=True)
+
+
+def _allow_iscsi_tgtadm(tid, address):
+ utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'target',
+ '--op', 'bind',
+ '--tid', tid,
+ '--initiator-address', address,
+ run_as_root=True)
+
+
+def _delete_iscsi_export_tgtadm(tid):
+ try:
+ utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'logicalunit',
+ '--op', 'delete',
+ '--tid', tid,
+ '--lun', '1',
+ run_as_root=True)
+ except exception.ProcessExecutionError:
+ pass
+ try:
+ utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'target',
+ '--op', 'delete',
+ '--tid', tid,
+ run_as_root=True)
+ except exception.ProcessExecutionError:
+ pass
+ # Check if the tid is deleted, that is, check the tid no longer exists.
+ # If the tid dose not exist, tgtadm returns with exit_code 22.
+ # utils.execute() can check the exit_code if check_exit_code parameter is
+ # passed. But, regardless of whether check_exit_code contains 0 or not,
+ # if the exit_code is 0, the function dose not report errors. So we have to
+ # catch a ProcessExecutionError and test its exit_code is 22.
+ try:
+ utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'target',
+ '--op', 'show',
+ '--tid', tid,
+ run_as_root=True)
+ except exception.ProcessExecutionError as e:
+ if e.exit_code == 22:
+ # OK, the tid is deleted
+ return
+ raise
+ raise exception.NovaException(_(
+ 'baremetal driver was unable to delete tid %s') % tid)
+
+
+def _show_tgtadm():
+ out, _ = utils.execute('tgtadm', '--lld', 'iscsi',
+ '--mode', 'target',
+ '--op', 'show',
+ run_as_root=True)
+ return out
+
+
+def _list_backingstore_path():
+ out = _show_tgtadm()
+ l = []
+ for line in out.split('\n'):
+ m = re.search(r'Backing store path: (.*)$', line)
+ if m:
+ if '/' in m.group(1):
+ l.append(m.group(1))
+ return l
+
+
+def _get_next_tid():
+ out = _show_tgtadm()
+ last_tid = 0
+ for line in out.split('\n'):
+ m = re.search(r'^Target (\d+):', line)
+ if m:
+ tid = int(m.group(1))
+ if last_tid < tid:
+ last_tid = tid
+ return last_tid + 1
+
+
+def _find_tid(iqn):
+ out = _show_tgtadm()
+ pattern = r'^Target (\d+): *' + re.escape(iqn)
+ for line in out.split('\n'):
+ m = re.search(pattern, line)
+ if m:
+ return int(m.group(1))
+ return None
+
+
+def _get_iqn(instance_name, mountpoint):
+ mp = mountpoint.replace('/', '-').strip('-')
+ iqn = '%s:%s-%s' % (CONF.baremetal.iscsi_iqn_prefix,
+ instance_name,
+ mp)
+ return iqn
+
+
+class VolumeDriver(object):
+
+ def __init__(self, virtapi):
+ super(VolumeDriver, self).__init__()
+ self.virtapi = virtapi
+ self._initiator = None
+
+ def get_volume_connector(self, instance):
+ if not self._initiator:
+ self._initiator = libvirt_utils.get_iscsi_initiator()
+ if not self._initiator:
+ LOG.warn(_('Could not determine iscsi initiator name '
+ 'for instance %s') % instance)
+ return {
+ 'ip': CONF.my_ip,
+ 'initiator': self._initiator,
+ 'host': CONF.host,
+ }
+
+ def attach_volume(self, connection_info, instance, mountpoint):
+ raise NotImplementedError()
+
+ def detach_volume(self, connection_info, instance, mountpoint):
+ raise NotImplementedError()
+
+
+class LibvirtVolumeDriver(VolumeDriver):
+ """The VolumeDriver deligates to nova.virt.libvirt.volume."""
+
+ def __init__(self, virtapi):
+ super(LibvirtVolumeDriver, self).__init__(virtapi)
+ self.volume_drivers = {}
+ for driver_str in CONF.libvirt_volume_drivers:
+ driver_type, _sep, driver = driver_str.partition('=')
+ driver_class = importutils.import_class(driver)
+ self.volume_drivers[driver_type] = driver_class(self)
+
+ def _volume_driver_method(self, method_name, connection_info,
+ *args, **kwargs):
+ driver_type = connection_info.get('driver_volume_type')
+ if not driver_type in self.volume_drivers:
+ raise exception.VolumeDriverNotFound(driver_type=driver_type)
+ driver = self.volume_drivers[driver_type]
+ method = getattr(driver, method_name)
+ return method(connection_info, *args, **kwargs)
+
+ def attach_volume(self, connection_info, instance, mountpoint):
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ ctx = nova_context.get_admin_context()
+ pxe_ip = bmdb.bm_pxe_ip_get_by_bm_node_id(ctx, node['id'])
+ if not pxe_ip:
+ if not CONF.baremetal.use_unsafe_iscsi:
+ raise exception.NovaException(_(
+ 'No fixed PXE IP is associated to %s') % instance['uuid'])
+
+ mount_device = mountpoint.rpartition("/")[2]
+ self._volume_driver_method('connect_volume',
+ connection_info,
+ mount_device)
+ device_path = connection_info['data']['device_path']
+ iqn = _get_iqn(instance['name'], mountpoint)
+ tid = _get_next_tid()
+ _create_iscsi_export_tgtadm(device_path, tid, iqn)
+
+ if pxe_ip:
+ _allow_iscsi_tgtadm(tid, pxe_ip['address'])
+ else:
+ # NOTE(NTTdocomo): Since nova-compute does not know the
+ # instance's initiator ip, it allows any initiators
+ # to connect to the volume. This means other bare-metal
+ # instances that are not attached the volume can connect
+ # to the volume. Do not set CONF.baremetal.use_unsafe_iscsi
+ # out of dev/test environments.
+ # TODO(NTTdocomo): support CHAP
+ _allow_iscsi_tgtadm(tid, 'ALL')
+
+ def detach_volume(self, connection_info, instance, mountpoint):
+ mount_device = mountpoint.rpartition("/")[2]
+ try:
+ iqn = _get_iqn(instance['name'], mountpoint)
+ tid = _find_tid(iqn)
+ if tid is not None:
+ _delete_iscsi_export_tgtadm(tid)
+ else:
+ LOG.warn(_('detach volume could not find tid for %s') % iqn)
+ finally:
+ self._volume_driver_method('disconnect_volume',
+ connection_info,
+ mount_device)
+
+ def get_all_block_devices(self):
+ """
+ Return all block devices in use on this node.
+ """
+ return _list_backingstore_path()
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 0dc11483d..886136460 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -21,9 +21,7 @@ import os
import shutil
import tempfile
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
@@ -46,13 +44,22 @@ configdrive_opts = [
default=None,
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
+ cfg.StrOpt('mkisofs_cmd',
+ default='genisoimage',
+ help='Name and optionally path of the tool used for '
+ 'ISO image creation')
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
+# Config drives are 64mb, if we can't size to the exact size of the data
+CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
+
class ConfigDriveBuilder(object):
+ """Build config drives, optionally as a context manager."""
+
def __init__(self, instance_md=None):
self.imagefile = None
@@ -65,6 +72,17 @@ class ConfigDriveBuilder(object):
if instance_md is not None:
self.add_instance_metadata(instance_md)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exctype, excval, exctb):
+ if exctype is not None:
+ # NOTE(mikal): this means we're being cleaned up because an
+ # exception was thrown. All bets are off now, and we should not
+ # swallow the exception
+ return False
+ self.cleanup()
+
def _add_file(self, path, data):
filepath = os.path.join(self.tempdir, path)
dirname = os.path.dirname(filepath)
@@ -79,14 +97,19 @@ class ConfigDriveBuilder(object):
{'filepath': path})
def _make_iso9660(self, path):
- utils.execute('genisoimage',
+ publisher = "%(product)s %(version)s" % {
+ 'product': version.product_string(),
+ 'version': version.version_string_with_package()
+ }
+
+ utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
- '-publisher', ('"OpenStack nova %s"'
- % version.version_string()),
+ '-publisher',
+ publisher,
'-quiet',
'-J',
'-r',
@@ -97,10 +120,9 @@ class ConfigDriveBuilder(object):
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
- # equivalent to genisoimage for vfat filesystems. vfat images are
- # always 64mb.
+ # equivalent to genisoimage for vfat filesystems.
with open(path, 'w') as f:
- f.truncate(64 * 1024 * 1024)
+ f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
@@ -108,20 +130,16 @@ class ConfigDriveBuilder(object):
try:
mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
- _out, err = utils.trycmd('mount', '-o', 'loop', path, mountdir,
+ _out, err = utils.trycmd('mount', '-o',
+ 'loop,uid=%d,gid=%d' % (os.getuid(),
+ os.getgid()),
+ path, mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
- _out, err = utils.trycmd('chown',
- '%s.%s' % (os.getuid(), os.getgid()),
- mountdir, run_as_root=True)
- if err:
- raise exception.ConfigDriveMountFailed(operation='chown',
- error=err)
-
# NOTE(mikal): I can't just use shutils.copytree here, because the
# destination directory already exists. This is annoying.
for ent in os.listdir(self.tempdir):
@@ -134,6 +152,12 @@ class ConfigDriveBuilder(object):
shutil.rmtree(mountdir)
def make_drive(self, path):
+ """Make the config drive.
+
+ :param path: the path to place the config drive image at
+
+ :raises ProcessExecuteError if a helper process has failed.
+ """
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path)
elif CONF.config_drive_format == 'vfat':
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 529f231af..26fb86f1e 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -32,16 +32,14 @@ import tempfile
if os.name != 'nt':
import crypt
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova import paths
from nova import utils
-from nova.virt.disk import guestfs
-from nova.virt.disk import loop
-from nova.virt.disk import nbd
+from nova.virt.disk.mount import api as mount
+from nova.virt.disk.vfs import api as vfs
from nova.virt import images
@@ -49,11 +47,8 @@ LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
- default='$pybasedir/nova/virt/interfaces.template',
+ default=paths.basedir_def('nova/virt/interfaces.template'),
help='Template file for injected network'),
- cfg.ListOpt('img_handlers',
- default=['loop', 'nbd', 'guestfs'],
- help='Order of methods used to mount disk images'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
@@ -77,7 +72,7 @@ disk_opts = [
'The format is <os_type>=<mkfs command>'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(disk_opts)
_MKFS_COMMAND = {}
@@ -117,7 +112,7 @@ def get_disk_size(path):
def extend(image, size):
- """Increase image to size"""
+ """Increase image to size."""
virt_size = get_disk_size(image)
if virt_size >= size:
return
@@ -129,41 +124,48 @@ def extend(image, size):
def can_resize_fs(image, size, use_cow=False):
"""Check whether we can resize contained file system."""
+ LOG.debug(_('Checking if we can resize image %(image)s. '
+ 'size=%(size)s, CoW=%(use_cow)s'), locals())
+
# Check that we're increasing the size
virt_size = get_disk_size(image)
if virt_size >= size:
+ LOG.debug(_('Cannot resize filesystem %s to a smaller size.'),
+ image)
return False
# Check the image is unpartitioned
if use_cow:
- # Try to mount an unpartitioned qcow2 image
try:
- inject_data(image, use_cow=True)
- except exception.NovaException:
+ fs = vfs.VFS.instance_for_image(image, 'qcow2', None)
+ fs.setup()
+ fs.teardown()
+ except exception.NovaException, e:
+ LOG.debug(_('Unable to mount image %(image)s with '
+ 'error %(error)s. Cannot resize.'),
+ {'image': image,
+ 'error': e})
return False
else:
# For raw, we can directly inspect the file system
try:
utils.execute('e2label', image)
- except exception.ProcessExecutionError:
+ except exception.ProcessExecutionError, e:
+ LOG.debug(_('Unable to determine label for image %(image)s with '
+ 'error %(errror)s. Cannot resize.'),
+ {'image': image,
+ 'error': e})
return False
return True
def bind(src, target, instance_name):
- """Bind device to a filesytem"""
+ """Bind device to a filesystem."""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
- s = os.stat(src)
- cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
- os.minor(s.st_rdev))
- cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
- "%s/devices.allow" % instance_name)
- utils.execute('tee', cgroups_path,
- process_input=cgroup_info, run_as_root=True)
def unbind(target):
@@ -181,25 +183,14 @@ class _DiskImage(object):
self.image = image
self.partition = partition
self.mount_dir = mount_dir
+ self.use_cow = use_cow
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
- # As a performance tweak, don't bother trying to
- # directly loopback mount a cow image.
- self.handlers = CONF.img_handlers[:]
- if use_cow and 'loop' in self.handlers:
- self.handlers.remove('loop')
-
- if not self.handlers:
- msg = _('no capable image handler configured')
- raise exception.NovaException(msg)
-
if mount_dir:
- # Note the os.path.ismount() shortcut doesn't
- # work with libguestfs due to permissions issues.
device = self._device_for_path(mount_dir)
if device:
self._reset(device)
@@ -218,12 +209,10 @@ class _DiskImage(object):
def _reset(self, device):
"""Reset internal state for a previously mounted directory."""
- mounter_cls = self._handler_class(device=device)
- mounter = mounter_cls(image=self.image,
- partition=self.partition,
- mount_dir=self.mount_dir,
- device=device)
- self._mounter = mounter
+ self._mounter = mount.Mount.instance_for_device(self.image,
+ self.mount_dir,
+ self.partition,
+ device)
mount_name = os.path.basename(self.mount_dir or '')
self._mkdir = mount_name.startswith(self.tmp_prefix)
@@ -233,17 +222,6 @@ class _DiskImage(object):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
- @staticmethod
- def _handler_class(mode=None, device=None):
- """Look up the appropriate class to use based on MODE or DEVICE."""
- for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
- if mode and cls.mode == mode:
- return cls
- elif device and cls.device_id_string in device:
- return cls
- msg = _("no disk image handler for: %s") % mode or device
- raise exception.NovaException(msg)
-
def mount(self):
"""Mount a disk image, using the object attributes.
@@ -259,21 +237,19 @@ class _DiskImage(object):
self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix)
self._mkdir = True
- try:
- for h in self.handlers:
- mounter_cls = self._handler_class(h)
- mounter = mounter_cls(image=self.image,
- partition=self.partition,
- mount_dir=self.mount_dir)
- if mounter.do_mount():
- self._mounter = mounter
- break
- else:
- LOG.debug(mounter.error)
- self._errors.append(mounter.error)
- finally:
- if not self._mounter:
- self.umount() # rmdir
+ imgfmt = "raw"
+ if self.use_cow:
+ imgfmt = "qcow2"
+
+ mounter = mount.Mount.instance_for_format(self.image,
+ self.mount_dir,
+ self.partition,
+ imgfmt)
+ if mounter.do_mount():
+ self._mounter = mounter
+ else:
+ LOG.debug(mounter.error)
+ self._errors.append(mounter.error)
return bool(self._mounter)
@@ -282,6 +258,7 @@ class _DiskImage(object):
try:
if self._mounter:
self._mounter.do_umount()
+ self._mounter = None
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
@@ -289,8 +266,7 @@ class _DiskImage(object):
# Public module functions
-def inject_data(image,
- key=None, net=None, metadata=None, admin_password=None,
+def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
files=None, partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
@@ -300,15 +276,20 @@ def inject_data(image,
If partition is not specified it mounts the image as a single partition.
"""
- img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
- if img.mount():
- try:
- inject_data_into_fs(img.mount_dir,
- key, net, metadata, admin_password, files)
- finally:
- img.umount()
- else:
- raise exception.NovaException(img.errors)
+ LOG.debug(_("Inject data image=%(image)s key=%(key)s net=%(net)s "
+ "metadata=%(metadata)s admin_password=ha-ha-not-telling-you "
+ "files=%(files)s partition=%(partition)s use_cow=%(use_cow)s")
+ % locals())
+ fmt = "raw"
+ if use_cow:
+ fmt = "qcow2"
+ fs = vfs.VFS.instance_for_image(image, fmt, partition)
+ fs.setup()
+ try:
+ inject_data_into_fs(fs,
+ key, net, metadata, admin_password, files)
+ finally:
+ fs.teardown()
def setup_container(image, container_dir, use_cow=False):
@@ -326,11 +307,11 @@ def setup_container(image, container_dir, use_cow=False):
raise exception.NovaException(img.errors)
-def destroy_container(container_dir):
- """Destroy the container once it terminates.
+def teardown_container(container_dir):
+ """Teardown the container rootfs mounting once it is spawned.
It will umount the container that is mounted,
- and delete any linked devices.
+ and delete any linked devices.
"""
try:
img = _DiskImage(image=None, mount_dir=container_dir)
@@ -357,72 +338,48 @@ def inject_data_into_fs(fs, key, net, metadata, admin_password, files):
_inject_file_into_fs(fs, path, contents)
-def _join_and_check_path_within_fs(fs, *args):
- '''os.path.join() with safety check for injected file paths.
-
- Join the supplied path components and make sure that the
- resulting path we are injecting into is within the
- mounted guest fs. Trying to be clever and specifying a
- path with '..' in it will hit this safeguard.
- '''
- absolute_path, _err = utils.execute('readlink', '-nm',
- os.path.join(fs, *args),
- run_as_root=True)
- if not absolute_path.startswith(os.path.realpath(fs) + '/'):
- raise exception.Invalid(_('injected file path not valid'))
- return absolute_path
-
-
def _inject_file_into_fs(fs, path, contents, append=False):
- absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/'))
-
- parent_dir = os.path.dirname(absolute_path)
- utils.execute('mkdir', '-p', parent_dir, run_as_root=True)
-
- args = []
+ LOG.debug(_("Inject file fs=%(fs)s path=%(path)s append=%(append)s") %
+ locals())
if append:
- args.append('-a')
- args.append(absolute_path)
-
- kwargs = dict(process_input=contents, run_as_root=True)
-
- utils.execute('tee', *args, **kwargs)
+ fs.append_file(path, contents)
+ else:
+ fs.replace_file(path, contents)
def _inject_metadata_into_fs(metadata, fs):
+ LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s") %
+ locals())
metadata = dict([(m['key'], m['value']) for m in metadata])
_inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
-def _setup_selinux_for_keys(fs):
+def _setup_selinux_for_keys(fs, sshdir):
"""Get selinux guests to ensure correct context on injected keys."""
- se_cfg = _join_and_check_path_within_fs(fs, 'etc', 'selinux')
- se_cfg, _err = utils.trycmd('readlink', '-e', se_cfg, run_as_root=True)
- if not se_cfg:
+ if not fs.has_file(os.path.join("etc", "selinux")):
return
- rclocal = _join_and_check_path_within_fs(fs, 'etc', 'rc.local')
+ rclocal = os.path.join('etc', 'rc.local')
+ rc_d = os.path.join('etc', 'rc.d')
- # Support systemd based systems
- rc_d = _join_and_check_path_within_fs(fs, 'etc', 'rc.d')
- rclocal_e, _err = utils.trycmd('readlink', '-e', rclocal, run_as_root=True)
- rc_d_e, _err = utils.trycmd('readlink', '-e', rc_d, run_as_root=True)
- if not rclocal_e and rc_d_e:
+ if not fs.has_file(rclocal) and fs.has_file(rc_d):
rclocal = os.path.join(rc_d, 'rc.local')
# Note some systems end rc.local with "exit 0"
# and so to append there you'd need something like:
# utils.execute('sed', '-i', '${/^exit 0$/d}' rclocal, run_as_root=True)
restorecon = [
- '#!/bin/sh\n',
+ '\n',
'# Added by Nova to ensure injected ssh keys have the right context\n',
- 'restorecon -RF /root/.ssh/ 2>/dev/null || :\n',
+ 'restorecon -RF %s 2>/dev/null || :\n' % sshdir,
]
- rclocal_rel = os.path.relpath(rclocal, fs)
- _inject_file_into_fs(fs, rclocal_rel, ''.join(restorecon), append=True)
- utils.execute('chmod', 'a+x', rclocal, run_as_root=True)
+ if not fs.has_file(rclocal):
+ restorecon.insert(0, '#!/bin/sh')
+
+ _inject_file_into_fs(fs, rclocal, ''.join(restorecon), append=True)
+ fs.set_permissions(rclocal, 0700)
def _inject_key_into_fs(key, fs):
@@ -431,12 +388,15 @@ def _inject_key_into_fs(key, fs):
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
- sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh')
- utils.execute('mkdir', '-p', sshdir, run_as_root=True)
- utils.execute('chown', 'root', sshdir, run_as_root=True)
- utils.execute('chmod', '700', sshdir, run_as_root=True)
- keyfile = os.path.join('root', '.ssh', 'authorized_keys')
+ LOG.debug(_("Inject key fs=%(fs)s key=%(key)s") %
+ locals())
+ sshdir = os.path.join('root', '.ssh')
+ fs.make_path(sshdir)
+ fs.set_ownership(sshdir, "root", "root")
+ fs.set_permissions(sshdir, 0700)
+
+ keyfile = os.path.join(sshdir, 'authorized_keys')
key_data = ''.join([
'\n',
@@ -448,7 +408,7 @@ def _inject_key_into_fs(key, fs):
_inject_file_into_fs(fs, keyfile, key_data, append=True)
- _setup_selinux_for_keys(fs)
+ _setup_selinux_for_keys(fs, sshdir)
def _inject_net_into_fs(net, fs):
@@ -456,10 +416,13 @@ def _inject_net_into_fs(net, fs):
net is the contents of /etc/network/interfaces.
"""
- netdir = _join_and_check_path_within_fs(fs, 'etc', 'network')
- utils.execute('mkdir', '-p', netdir, run_as_root=True)
- utils.execute('chown', 'root:root', netdir, run_as_root=True)
- utils.execute('chmod', 755, netdir, run_as_root=True)
+
+ LOG.debug(_("Inject key fs=%(fs)s net=%(net)s") %
+ locals())
+ netdir = os.path.join('etc', 'network')
+ fs.make_path(netdir)
+ fs.set_ownership(netdir, "root", "root")
+ fs.set_permissions(netdir, 0744)
netfile = os.path.join('etc', 'network', 'interfaces')
_inject_file_into_fs(fs, netfile, net)
@@ -480,6 +443,9 @@ def _inject_admin_password_into_fs(admin_passwd, fs):
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
+ LOG.debug(_("Inject admin password fs=%(fs)s "
+ "admin_passwd=ha-ha-not-telling-you") %
+ locals())
admin_user = 'root'
fd, tmp_passwd = tempfile.mkstemp()
@@ -487,19 +453,27 @@ def _inject_admin_password_into_fs(admin_passwd, fs):
fd, tmp_shadow = tempfile.mkstemp()
os.close(fd)
- passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd')
- shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow')
+ passwd_path = os.path.join('etc', 'passwd')
+ shadow_path = os.path.join('etc', 'shadow')
+
+ passwd_data = fs.read_file(passwd_path)
+ shadow_data = fs.read_file(shadow_path)
+
+ new_shadow_data = _set_passwd(admin_user, admin_passwd,
+ passwd_data, shadow_data)
- utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True)
- utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True)
- _set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow)
- utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True)
- os.unlink(tmp_passwd)
- utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True)
- os.unlink(tmp_shadow)
+ fs.replace_file(shadow_path, new_shadow_data)
-def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
+def _generate_salt():
+ salt_set = ('abcdefghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ '0123456789./')
+ salt = 16 * ' '
+ return ''.join([random.choice(salt_set) for c in salt])
+
+
+def _set_passwd(username, admin_passwd, passwd_data, shadow_data):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
@@ -516,14 +490,10 @@ def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
if os.name == 'nt':
raise exception.NovaException(_('Not implemented on Windows'))
- salt_set = ('abcdefghijklmnopqrstuvwxyz'
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
- '0123456789./')
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
- salt = 16 * ' '
- salt = ''.join([random.choice(salt_set) for c in salt])
+ salt = _generate_salt()
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
@@ -536,39 +506,34 @@ def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
- try:
- p_file = open(passwd_file, 'rb')
- s_file = open(shadow_file, 'rb')
-
- # username MUST exist in passwd file or it's an error
- found = False
- for entry in p_file:
- split_entry = entry.split(':')
- if split_entry[0] == username:
- found = True
- break
- if not found:
- msg = _('User %(username)s not found in password file.')
- raise exception.NovaException(msg % username)
-
- # update password in the shadow file.It's an error if the
- # the user doesn't exist.
- new_shadow = list()
- found = False
- for entry in s_file:
- split_entry = entry.split(':')
- if split_entry[0] == username:
- split_entry[1] = encrypted_passwd
- found = True
- new_entry = ':'.join(split_entry)
- new_shadow.append(new_entry)
- s_file.close()
- if not found:
- msg = _('User %(username)s not found in shadow file.')
- raise exception.NovaException(msg % username)
- s_file = open(shadow_file, 'wb')
- for entry in new_shadow:
- s_file.write(entry)
- finally:
- p_file.close()
- s_file.close()
+ p_file = passwd_data.split("\n")
+ s_file = shadow_data.split("\n")
+
+ # username MUST exist in passwd file or it's an error
+ found = False
+ for entry in p_file:
+ split_entry = entry.split(':')
+ if split_entry[0] == username:
+ found = True
+ break
+ if not found:
+ msg = _('User %(username)s not found in password file.')
+ raise exception.NovaException(msg % username)
+
+ # update password in the shadow file.It's an error if the
+ # the user doesn't exist.
+ new_shadow = list()
+ found = False
+ for entry in s_file:
+ split_entry = entry.split(':')
+ if split_entry[0] == username:
+ split_entry[1] = encrypted_passwd
+ found = True
+ new_entry = ':'.join(split_entry)
+ new_shadow.append(new_entry)
+
+ if not found:
+ msg = _('User %(username)s not found in shadow file.')
+ raise exception.NovaException(msg % username)
+
+ return "\n".join(new_shadow)
diff --git a/nova/virt/disk/guestfs.py b/nova/virt/disk/guestfs.py
deleted file mode 100644
index 21e33b1d7..000000000
--- a/nova/virt/disk/guestfs.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Support for mounting images with libguestfs"""
-
-import os
-
-from nova import exception
-from nova import utils
-from nova.virt.disk import mount
-
-
-class Mount(mount.Mount):
- """libguestfs support for arbitrary images."""
- mode = 'guestfs'
- device_id_string = 'guest'
-
- def map_dev(self):
- self.mapped = True
- return True
-
- def unmap_dev(self):
- self.mapped = False
-
- def mnt_dev(self):
- try:
- partition = int(self.partition or 0)
- except ValueError:
- self.error = _('unsupported partition: %s') % self.partition
- return False
-
- args = ('guestmount', '--rw', '-a', self.image)
- if partition == -1:
- args += ('-i',) # find the OS partition
- elif partition:
- args += ('-m', '/dev/sda%d' % partition)
- else:
- # We don't resort to -i for this case yet,
- # as some older versions of libguestfs
- # have problems identifying ttylinux images for example
- args += ('-m', '/dev/sda')
- args += (self.mount_dir,)
- # root access should not required for guestfs (if the user
- # has permissions to fusermount (by being part of the fuse
- # group for example)). Also note the image and mount_dir
- # have appropriate creditials at this point for read/write
- # mounting by the nova user. However currently there are
- # subsequent access issues by both the nova and root users
- # if the nova user mounts the image, as detailed here:
- # https://bugzilla.redhat.com/show_bug.cgi?id=765814
- _out, err = utils.trycmd(*args, discard_warnings=True,
- run_as_root=True)
- if err:
- self.error = _('Failed to mount filesystem: %s') % err
- # Be defensive and ensure this is unmounted,
- # as I'm not sure guestmount will never have
- # mounted when it returns EXIT_FAILURE.
- # This is required if discard_warnings=False above
- utils.trycmd('fusermount', '-u', self.mount_dir, run_as_root=True)
- return False
-
- # More defensiveness as there are edge cases where
- # guestmount can return success while not mounting
- try:
- if not os.listdir(self.mount_dir):
- # Assume we've just got the original empty temp dir
- err = _('unknown guestmount error')
- self.error = _('Failed to mount filesystem: %s') % err
- return False
- except OSError:
- # This is the usual path and means root has
- # probably mounted fine
- pass
-
- self.mounted = True
- return True
-
- def unmnt_dev(self):
- if not self.mounted:
- return
- umount_cmd = ['fusermount', '-u', self.mount_dir]
- try:
- # We make a few attempts to work around other
- # processes temporarily scanning the mount_dir etc.
- utils.execute(*umount_cmd, attempts=5, run_as_root=True)
- except exception.ProcessExecutionError:
- # If we still can't umount, then do a lazy umount
- # (in the background), so that mounts might eventually
- # be cleaned up. Note we'll wait 10s below for the umount to
- # complete, after which we'll raise an exception.
- umount_cmd.insert(1, '-z')
- utils.execute(*umount_cmd, run_as_root=True)
-
- # Unfortunately FUSE has an issue where it doesn't wait
- # for processes associated with the mount to terminate.
- # Therefore we do this manually here. Note later versions
- # of guestmount have the --pid-file option to help with this.
- # Here we check every .2 seconds whether guestmount is finished
- # but do this for at most 10 seconds.
- wait_cmd = 'until ! ps -C guestmount -o args= | grep -qF "%s"; '
- wait_cmd += 'do sleep .2; done'
- wait_cmd %= self.mount_dir
- try:
- utils.execute('timeout', '10s', 'sh', '-c', wait_cmd)
- self.mounted = False
- except exception.ProcessExecutionError:
- msg = _("Failed to umount image at %s, guestmount was "
- "still running after 10s") % (self.mount_dir)
- raise exception.NovaException(msg)
diff --git a/nova/virt/disk/mount/__init__.py b/nova/virt/disk/mount/__init__.py
new file mode 100644
index 000000000..5c18da32d
--- /dev/null
+++ b/nova/virt/disk/mount/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Support for mounting disk images on the host filesystem
+
+"""
diff --git a/nova/virt/disk/mount.py b/nova/virt/disk/mount/api.py
index e683658d2..4de9d9c77 100644
--- a/nova/virt/disk/mount.py
+++ b/nova/virt/disk/mount/api.py
@@ -13,15 +13,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting virtual image files"""
+"""Support for mounting virtual image files."""
import os
+import time
+from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
+MAX_DEVICE_WAIT = 30
+
class Mount(object):
"""Standard mounting operations, that can be overridden by subclasses.
@@ -30,7 +34,39 @@ class Mount(object):
to be called in that order.
"""
- mode = device_id_string = None # to be overridden in subclasses
+ mode = None # to be overridden in subclasses
+
+ @staticmethod
+ def instance_for_format(imgfile, mountdir, partition, imgfmt):
+ LOG.debug(_("Instance for format imgfile=%(imgfile)s "
+ "mountdir=%(mountdir)s partition=%(partition)s "
+ "imgfmt=%(imgfmt)s") % locals())
+ if imgfmt == "raw":
+ LOG.debug(_("Using LoopMount"))
+ return importutils.import_object(
+ "nova.virt.disk.mount.loop.LoopMount",
+ imgfile, mountdir, partition)
+ else:
+ LOG.debug(_("Using NbdMount"))
+ return importutils.import_object(
+ "nova.virt.disk.mount.nbd.NbdMount",
+ imgfile, mountdir, partition)
+
+ @staticmethod
+ def instance_for_device(imgfile, mountdir, partition, device):
+ LOG.debug(_("Instance for device imgfile=%(imgfile)s "
+ "mountdir=%(mountdir)s partition=%(partition)s "
+ "device=%(device)s") % locals())
+ if "loop" in device:
+ LOG.debug(_("Using LoopMount"))
+ return importutils.import_object(
+ "nova.virt.disk.mount.loop.LoopMount",
+ imgfile, mountdir, partition, device)
+ else:
+ LOG.debug(_("Using NbdMount"))
+ return importutils.import_object(
+ "nova.virt.disk.mount.nbd.NbdMount",
+ imgfile, mountdir, partition, device)
def __init__(self, image, mount_dir, partition=None, device=None):
@@ -69,6 +105,26 @@ class Mount(object):
self.linked = True
return True
+ def _get_dev_retry_helper(self):
+ """Some implementations need to retry their get_dev."""
+ # NOTE(mikal): This method helps implement retries. The implementation
+ # simply calls _get_dev_retry_helper from their get_dev, and implements
+ # _inner_get_dev with their device acquisition logic. The NBD
+ # implementation has an example.
+ start_time = time.time()
+ device = self._inner_get_dev()
+ while not device:
+ LOG.info(_('Device allocation failed. Will retry in 2 seconds.'))
+ time.sleep(2)
+ if time.time() - start_time > MAX_DEVICE_WAIT:
+ LOG.warn(_('Device allocation failed after repeated retries.'))
+ return False
+ device = self._inner_get_dev()
+ return True
+
+ def _inner_get_dev(self):
+ raise NotImplementedError()
+
def unget_dev(self):
"""Release the block device from the file system namespace."""
self.linked = False
@@ -76,6 +132,7 @@ class Mount(object):
def map_dev(self):
"""Map partitions of the device to the file system namespace."""
assert(os.path.exists(self.device))
+ LOG.debug(_("Map dev %s"), self.device)
automapped_path = '/dev/%sp%s' % (os.path.basename(self.device),
self.partition)
@@ -119,6 +176,7 @@ class Mount(object):
"""Remove partitions of the device from the file system namespace."""
if not self.mapped:
return
+ LOG.debug(_("Unmap dev %s"), self.device)
if self.partition and not self.automapped:
utils.execute('kpartx', '-d', self.device, run_as_root=True)
self.mapped = False
@@ -126,6 +184,8 @@ class Mount(object):
def mnt_dev(self):
"""Mount the device into the file system."""
+ LOG.debug(_("Mount %(dev)s on %(dir)s") %
+ {'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
run_as_root=True)
if err:
@@ -139,6 +199,7 @@ class Mount(object):
"""Unmount the device from the file system."""
if not self.mounted:
return
+ LOG.debug(_("Umount %s") % self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
@@ -149,6 +210,7 @@ class Mount(object):
status = self.get_dev() and self.map_dev() and self.mnt_dev()
finally:
if not status:
+ LOG.debug(_("Fail to mount, tearing back down"))
self.do_umount()
return status
diff --git a/nova/virt/disk/loop.py b/nova/virt/disk/mount/loop.py
index 3dfdc32d3..366d34715 100644
--- a/nova/virt/disk/loop.py
+++ b/nova/virt/disk/mount/loop.py
@@ -13,30 +13,50 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with the loop device"""
+"""Support for mounting images with the loop device."""
+from nova.openstack.common import log as logging
from nova import utils
-from nova.virt.disk import mount
+from nova.virt.disk.mount import api
+LOG = logging.getLogger(__name__)
-class Mount(mount.Mount):
+
+class LoopMount(api.Mount):
"""loop back support for raw images."""
mode = 'loop'
- device_id_string = mode
- def get_dev(self):
+ def _inner_get_dev(self):
out, err = utils.trycmd('losetup', '--find', '--show', self.image,
run_as_root=True)
if err:
self.error = _('Could not attach image to loopback: %s') % err
+ LOG.info(_('Loop mount error: %s'), self.error)
+ self.linked = False
+ self.device = None
return False
self.device = out.strip()
+ LOG.debug(_("Got loop device %s"), self.device)
self.linked = True
return True
+ def get_dev(self):
+ # NOTE(mikal): the retry is required here in case we are low on loop
+ # devices. Note however that modern kernels will use more loop devices
+ # if they exist. If you're seeing lots of retries, consider adding
+ # more devices.
+ return self._get_dev_retry_helper()
+
def unget_dev(self):
if not self.linked:
return
- utils.execute('losetup', '--detach', self.device, run_as_root=True)
+
+ # NOTE(mikal): On some kernels, losetup -d will intermittently fail,
+ # thus leaking a loop device unless the losetup --detach is retried:
+ # https://lkml.org/lkml/2012/9/28/62
+ LOG.debug(_("Release loop device %s"), self.device)
+ utils.execute('losetup', '--detach', self.device, run_as_root=True,
+ attempts=3)
self.linked = False
+ self.device = None
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
new file mode 100644
index 000000000..72302fb91
--- /dev/null
+++ b/nova/virt/disk/mount/nbd.py
@@ -0,0 +1,128 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Support for mounting images with qemu-nbd."""
+
+import os
+import random
+import re
+import time
+
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.disk.mount import api
+
+LOG = logging.getLogger(__name__)
+
+nbd_opts = [
+ cfg.IntOpt('timeout_nbd',
+ default=10,
+ help='time to wait for a NBD device coming up'),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(nbd_opts)
+
+NBD_DEVICE_RE = re.compile('nbd[0-9]+')
+
+
+class NbdMount(api.Mount):
+ """qemu-nbd support disk images."""
+ mode = 'nbd'
+
+ def _detect_nbd_devices(self):
+ """Detect nbd device files."""
+ return filter(NBD_DEVICE_RE.match, os.listdir('/sys/block/'))
+
+ def _find_unused(self, devices):
+ for device in devices:
+ if not os.path.exists(os.path.join('/sys/block/', device, 'pid')):
+ return device
+ LOG.warn(_('No free nbd devices'))
+ return None
+
+ def _allocate_nbd(self):
+ if not os.path.exists('/sys/block/nbd0'):
+ LOG.error(_('nbd module not loaded'))
+ self.error = _('nbd unavailable: module not loaded')
+ return None
+
+ devices = self._detect_nbd_devices()
+ random.shuffle(devices)
+ device = self._find_unused(devices)
+ if not device:
+ # really want to log this info, not raise
+ self.error = _('No free nbd devices')
+ return None
+ return os.path.join('/dev', device)
+
+ def _read_pid_file(self, pidfile):
+ # This is for unit test convenience
+ with open(pidfile) as f:
+ pid = int(f.readline())
+ return pid
+
+ def _inner_get_dev(self):
+ device = self._allocate_nbd()
+ if not device:
+ return False
+
+ # NOTE(mikal): qemu-nbd will return an error if the device file is
+ # already in use.
+ LOG.debug(_('Get nbd device %(dev)s for %(imgfile)s'),
+ {'dev': device, 'imgfile': self.image})
+ _out, err = utils.trycmd('qemu-nbd', '-c', device, self.image,
+ run_as_root=True)
+ if err:
+ self.error = _('qemu-nbd error: %s') % err
+ LOG.info(_('NBD mount error: %s'), self.error)
+ return False
+
+ # NOTE(vish): this forks into another process, so give it a chance
+ # to set up before continuing
+ pidfile = "/sys/block/%s/pid" % os.path.basename(device)
+ for _i in range(CONF.timeout_nbd):
+ if os.path.exists(pidfile):
+ self.device = device
+ break
+ time.sleep(1)
+ else:
+ self.error = _('nbd device %s did not show up') % device
+ LOG.info(_('NBD mount error: %s'), self.error)
+
+ # Cleanup
+ _out, err = utils.trycmd('qemu-nbd', '-d', device,
+ run_as_root=True)
+ if err:
+ LOG.warn(_('Detaching from erroneous nbd device returned '
+ 'error: %s'), err)
+ return False
+
+ self.error = ''
+ self.linked = True
+ return True
+
+ def get_dev(self):
+ """Retry requests for NBD devices."""
+ return self._get_dev_retry_helper()
+
+ def unget_dev(self):
+ if not self.linked:
+ return
+ LOG.debug(_('Release nbd device %s'), self.device)
+ utils.execute('qemu-nbd', '-d', self.device, run_as_root=True)
+ self.linked = False
+ self.device = None
diff --git a/nova/virt/disk/nbd.py b/nova/virt/disk/nbd.py
deleted file mode 100644
index 8503273a6..000000000
--- a/nova/virt/disk/nbd.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Support for mounting images with qemu-nbd"""
-
-import os
-import time
-
-from nova import config
-from nova import flags
-from nova.openstack.common import cfg
-from nova import utils
-from nova.virt.disk import mount
-
-
-nbd_opts = [
- cfg.IntOpt('timeout_nbd',
- default=10,
- help='time to wait for a NBD device coming up'),
- cfg.IntOpt('max_nbd_devices',
- default=16,
- help='maximum number of possible nbd devices'),
- ]
-
-CONF = config.CONF
-CONF.register_opts(nbd_opts)
-
-
-class Mount(mount.Mount):
- """qemu-nbd support disk images."""
- mode = 'nbd'
- device_id_string = mode
-
- # NOTE(padraig): There are three issues with this nbd device handling
- # 1. max_nbd_devices should be inferred (#861504)
- # 2. We assume nothing else on the system uses nbd devices
- # 3. Multiple workers on a system can race against each other
- # A patch has been proposed in Nov 2011, to add add a -f option to
- # qemu-nbd, akin to losetup -f. One could test for this by running qemu-nbd
- # with just the -f option, where it will fail if not supported, or if there
- # are no free devices. Note that patch currently hardcodes 16 devices.
- # We might be able to alleviate problem 2. by scanning /proc/partitions
- # like the aformentioned patch does.
- _DEVICES = ['/dev/nbd%s' % i for i in range(CONF.max_nbd_devices)]
-
- def _allocate_nbd(self):
- if not os.path.exists("/sys/block/nbd0"):
- self.error = _('nbd unavailable: module not loaded')
- return None
- while True:
- if not self._DEVICES:
- # really want to log this info, not raise
- self.error = _('No free nbd devices')
- return None
- device = self._DEVICES.pop()
- if not os.path.exists("/sys/block/%s/pid" %
- os.path.basename(device)):
- break
- return device
-
- def _free_nbd(self, device):
- # The device could already be present if unget_dev
- # is called right after a nova restart
- # (when destroying an LXC container for example).
- if not device in self._DEVICES:
- self._DEVICES.append(device)
-
- def get_dev(self):
- device = self._allocate_nbd()
- if not device:
- return False
- _out, err = utils.trycmd('qemu-nbd', '-c', device, self.image,
- run_as_root=True)
- if err:
- self.error = _('qemu-nbd error: %s') % err
- self._free_nbd(device)
- return False
-
- # NOTE(vish): this forks into another process, so give it a chance
- # to set up before continuing
- for _i in range(CONF.timeout_nbd):
- if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
- self.device = device
- break
- time.sleep(1)
- else:
- self.error = _('nbd device %s did not show up') % device
- self._free_nbd(device)
- return False
-
- self.linked = True
- return True
-
- def unget_dev(self):
- if not self.linked:
- return
- utils.execute('qemu-nbd', '-d', self.device, run_as_root=True)
- self._free_nbd(self.device)
- self.linked = False
- self.device = None
diff --git a/nova/virt/disk/vfs/__init__.py b/nova/virt/disk/vfs/__init__.py
new file mode 100644
index 000000000..880979c48
--- /dev/null
+++ b/nova/virt/disk/vfs/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Operations on virtual filesystems
+
+"""
diff --git a/nova/virt/disk/vfs/api.py b/nova/virt/disk/vfs/api.py
new file mode 100644
index 000000000..445752d9c
--- /dev/null
+++ b/nova/virt/disk/vfs/api.py
@@ -0,0 +1,132 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class VFS(object):
+
+ @staticmethod
+ def instance_for_image(imgfile, imgfmt, partition):
+ LOG.debug(_("Instance for image imgfile=%(imgfile)s "
+ "imgfmt=%(imgfmt)s partition=%(partition)s")
+ % locals())
+ hasGuestfs = False
+ try:
+ LOG.debug(_("Trying to import guestfs"))
+ importutils.import_module("guestfs")
+ hasGuestfs = True
+ except Exception:
+ pass
+
+ if hasGuestfs:
+ LOG.debug(_("Using primary VFSGuestFS"))
+ return importutils.import_object(
+ "nova.virt.disk.vfs.guestfs.VFSGuestFS",
+ imgfile, imgfmt, partition)
+ else:
+ LOG.debug(_("Falling back to VFSLocalFS"))
+ return importutils.import_object(
+ "nova.virt.disk.vfs.localfs.VFSLocalFS",
+ imgfile, imgfmt, partition)
+
+ """
+ The VFS class defines an interface for manipulating files within
+ a virtual disk image filesystem. This allows file injection code
+ to avoid the assumption that the virtual disk image can be mounted
+ in the host filesystem.
+
+ All paths provided to the APIs in this class should be relative
+ to the root of the virtual disk image filesystem. Subclasses
+ will translate paths as required by their implementation.
+ """
+ def __init__(self, imgfile, imgfmt, partition):
+ self.imgfile = imgfile
+ self.imgfmt = imgfmt
+ self.partition = partition
+
+ """
+ Perform any one-time setup tasks to make the virtual
+ filesystem available to future API calls
+ """
+ def setup(self):
+ pass
+
+ """
+ Release all resources initialized in the setup method
+ """
+ def teardown(self):
+ pass
+
+ """
+ Create a directory @path, including all intermedia
+ path components if they do not already exist
+ """
+ def make_path(self, path):
+ pass
+
+ """
+ Append @content to the end of the file identified
+ by @path, creating the file if it does not already
+ exist
+ """
+ def append_file(self, path, content):
+ pass
+
+ """
+ Replace the entire contents of the file identified
+ by @path, with @content, creating the file if it does
+ not already exist
+ """
+ def replace_file(self, path, content):
+ pass
+
+ """
+ Return the entire contents of the file identified
+ by @path
+ """
+ def read_file(self, path):
+ pass
+
+ """
+ Return a True if the file identified by @path
+ exists
+ """
+ def has_file(self, path):
+ pass
+
+ """
+ Set the permissions on the file identified by
+ @path to @mode. The file must exist prior to
+ this call.
+ """
+ def set_permissions(self, path, mode):
+ pass
+
+ """
+ Set the ownership on the file identified by
+ @path to the username @user and groupname @group.
+ Either of @user or @group may be None, in which case
+ the current ownership will be left unchanged. The
+ ownership must be passed in string form, allowing
+ subclasses to translate to uid/gid form as required.
+ The file must exist prior to this call.
+ """
+ def set_ownership(self, path, user, group):
+ pass
diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py
new file mode 100644
index 000000000..acea8afdf
--- /dev/null
+++ b/nova/virt/disk/vfs/guestfs.py
@@ -0,0 +1,196 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import guestfs
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt.disk.vfs import api as vfs
+
+LOG = logging.getLogger(__name__)
+
+guestfs = None
+
+
+class VFSGuestFS(vfs.VFS):
+
+ """
+ This class implements a VFS module that uses the libguestfs APIs
+ to access the disk image. The disk image is never mapped into
+ the host filesystem, thus avoiding any potential for symlink
+ attacks from the guest filesystem.
+ """
+ def __init__(self, imgfile, imgfmt='raw', partition=None):
+ super(VFSGuestFS, self).__init__(imgfile, imgfmt, partition)
+
+ global guestfs
+ if guestfs is None:
+ guestfs = __import__('guestfs')
+
+ self.handle = None
+
+ def setup_os(self):
+ if self.partition == -1:
+ self.setup_os_inspect()
+ else:
+ self.setup_os_static()
+
+ def setup_os_static(self):
+ LOG.debug(_("Mount guest OS image %(imgfile)s partition %(part)s"),
+ {'imgfile': self.imgfile, 'part': str(self.partition)})
+
+ if self.partition:
+ self.handle.mount_options("", "/dev/sda%d" % self.partition, "/")
+ else:
+ self.handle.mount_options("", "/dev/sda", "/")
+
+ def setup_os_inspect(self):
+ LOG.debug(_("Inspecting guest OS image %s"), self.imgfile)
+ roots = self.handle.inspect_os()
+
+ if len(roots) == 0:
+ raise exception.NovaException(_("No operating system found in %s"),
+ self.imgfile)
+
+ if len(roots) != 1:
+ LOG.debug(_("Multi-boot OS %(roots)s") % {'roots': str(roots)})
+ raise exception.NovaException(
+ _("Multi-boot operating system found in %s"),
+ self.imgfile)
+
+ self.setup_os_root(roots[0])
+
+ def setup_os_root(self, root):
+ LOG.debug(_("Inspecting guest OS root filesystem %s"), root)
+ mounts = self.handle.inspect_get_mountpoints(root)
+
+ if len(mounts) == 0:
+ raise exception.NovaException(
+ _("No mount points found in %(root)s of %(imgfile)s") %
+ {'root': root, 'imgfile': self.imgfile})
+
+ mounts.sort(key=lambda mount: mount[1])
+ for mount in mounts:
+ LOG.debug(_("Mounting %(dev)s at %(dir)s") %
+ {'dev': mount[1], 'dir': mount[0]})
+ self.handle.mount_options("", mount[1], mount[0])
+
+ def setup(self):
+ LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") %
+ {'imgfile': self.imgfile, 'imgfmt': self.imgfmt})
+ self.handle = guestfs.GuestFS()
+
+ try:
+ self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
+ self.handle.launch()
+
+ self.setup_os()
+
+ self.handle.aug_init("/", 0)
+ except RuntimeError, e:
+ # dereference object and implicitly close()
+ self.handle = None
+ raise exception.NovaException(
+ _("Error mounting %(imgfile)s with libguestfs (%(e)s)") %
+ {'imgfile': self.imgfile, 'e': e})
+ except Exception:
+ self.handle = None
+ raise
+
+ def teardown(self):
+ LOG.debug(_("Tearing down appliance"))
+
+ try:
+ try:
+ self.handle.aug_close()
+ except RuntimeError, e:
+ LOG.warn(_("Failed to close augeas %s"), e)
+
+ try:
+ self.handle.shutdown()
+ except AttributeError:
+ # Older libguestfs versions haven't an explicit shutdown
+ pass
+ except RuntimeError, e:
+ LOG.warn(_("Failed to shutdown appliance %s"), e)
+
+ try:
+ self.handle.close()
+ except AttributeError:
+ # Older libguestfs versions haven't an explicit close
+ pass
+ except RuntimeError, e:
+ LOG.warn(_("Failed to close guest handle %s"), e)
+ finally:
+ # dereference object and implicitly close()
+ self.handle = None
+
+ @staticmethod
+ def _canonicalize_path(path):
+ if path[0] != '/':
+ return '/' + path
+ return path
+
+ def make_path(self, path):
+ LOG.debug(_("Make directory path=%(path)s") % locals())
+ path = self._canonicalize_path(path)
+ self.handle.mkdir_p(path)
+
+ def append_file(self, path, content):
+ LOG.debug(_("Append file path=%(path)s") % locals())
+ path = self._canonicalize_path(path)
+ self.handle.write_append(path, content)
+
+ def replace_file(self, path, content):
+ LOG.debug(_("Replace file path=%(path)s") % locals())
+ path = self._canonicalize_path(path)
+ self.handle.write(path, content)
+
+ def read_file(self, path):
+ LOG.debug(_("Read file path=%(path)s") % locals())
+ path = self._canonicalize_path(path)
+ return self.handle.read_file(path)
+
+ def has_file(self, path):
+ LOG.debug(_("Has file path=%(path)s") % locals())
+ path = self._canonicalize_path(path)
+ try:
+ self.handle.stat(path)
+ return True
+ except RuntimeError:
+ return False
+
+ def set_permissions(self, path, mode):
+ LOG.debug(_("Set permissions path=%(path)s mode=%(mode)s") % locals())
+ path = self._canonicalize_path(path)
+ self.handle.chmod(mode, path)
+
+ def set_ownership(self, path, user, group):
+ LOG.debug(_("Set ownership path=%(path)s "
+ "user=%(user)s group=%(group)s") % locals())
+ path = self._canonicalize_path(path)
+ uid = -1
+ gid = -1
+
+ if user is not None:
+ uid = int(self.handle.aug_get(
+ "/files/etc/passwd/" + user + "/uid"))
+ if group is not None:
+ gid = int(self.handle.aug_get(
+ "/files/etc/group/" + group + "/gid"))
+
+ LOG.debug(_("chown uid=%(uid)d gid=%(gid)s") % locals())
+ self.handle.chown(uid, gid, path)
diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py
new file mode 100644
index 000000000..9efa6798b
--- /dev/null
+++ b/nova/virt/disk/vfs/localfs.py
@@ -0,0 +1,158 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import tempfile
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.disk.mount import loop
+from nova.virt.disk.mount import nbd
+from nova.virt.disk.vfs import api as vfs
+
+LOG = logging.getLogger(__name__)
+
+
+class VFSLocalFS(vfs.VFS):
+
+ """
+ os.path.join() with safety check for injected file paths.
+
+ Join the supplied path components and make sure that the
+ resulting path we are injecting into is within the
+ mounted guest fs. Trying to be clever and specifying a
+ path with '..' in it will hit this safeguard.
+ """
+ def _canonical_path(self, path):
+ canonpath, _err = utils.execute(
+ 'readlink', '-nm',
+ os.path.join(self.imgdir, path.lstrip("/")),
+ run_as_root=True)
+ if not canonpath.startswith(os.path.realpath(self.imgdir) + '/'):
+ raise exception.Invalid(_('File path %s not valid') % path)
+ return canonpath
+
+ """
+ This class implements a VFS module that is mapped to a virtual
+ root directory present on the host filesystem. This implementation
+ uses the nova.virt.disk.mount.Mount API to make virtual disk
+ images visible in the host filesystem. If the disk format is
+ raw, it will use the loopback mount impl, otherwise it will
+ use the qemu-nbd impl.
+ """
+ def __init__(self, imgfile, imgfmt="raw", partition=None, imgdir=None):
+ super(VFSLocalFS, self).__init__(imgfile, imgfmt, partition)
+
+ self.imgdir = imgdir
+ self.mount = None
+
+ def setup(self):
+ self.imgdir = tempfile.mkdtemp(prefix="openstack-vfs-localfs")
+ try:
+ if self.imgfmt == "raw":
+ LOG.debug(_("Using LoopMount"))
+ mount = loop.LoopMount(self.imgfile,
+ self.imgdir,
+ self.partition)
+ else:
+ LOG.debug(_("Using NbdMount"))
+ mount = nbd.NbdMount(self.imgfile,
+ self.imgdir,
+ self.partition)
+ if not mount.do_mount():
+ raise exception.NovaException(mount.error)
+ self.mount = mount
+ except Exception, e:
+ LOG.debug(_("Failed to mount image %(ex)s)") %
+ {'ex': str(e)})
+ self.teardown()
+ raise e
+
+ def teardown(self):
+ try:
+ if self.mount:
+ self.mount.do_umount()
+ except Exception, e:
+ LOG.debug(_("Failed to unmount %(imgdir)s: %(ex)s") %
+ {'imgdir': self.imgdir, 'ex': str(e)})
+ try:
+ if self.imgdir:
+ os.rmdir(self.imgdir)
+ except Exception, e:
+ LOG.debug(_("Failed to remove %(imgdir)s: %(ex)s") %
+ {'imgdir': self.imgdir, 'ex': str(e)})
+ self.imgdir = None
+ self.mount = None
+
+ def make_path(self, path):
+ LOG.debug(_("Make directory path=%(path)s") % locals())
+ canonpath = self._canonical_path(path)
+ utils.execute('mkdir', '-p', canonpath, run_as_root=True)
+
+ def append_file(self, path, content):
+ LOG.debug(_("Append file path=%(path)s") % locals())
+ canonpath = self._canonical_path(path)
+
+ args = ["-a", canonpath]
+ kwargs = dict(process_input=content, run_as_root=True)
+
+ utils.execute('tee', *args, **kwargs)
+
+ def replace_file(self, path, content):
+ LOG.debug(_("Replace file path=%(path)s") % locals())
+ canonpath = self._canonical_path(path)
+
+ args = [canonpath]
+ kwargs = dict(process_input=content, run_as_root=True)
+
+ utils.execute('tee', *args, **kwargs)
+
+ def read_file(self, path):
+ LOG.debug(_("Read file path=%(path)s") % locals())
+ canonpath = self._canonical_path(path)
+
+ return utils.read_file_as_root(canonpath)
+
+ def has_file(self, path):
+ LOG.debug(_("Has file path=%(path)s") % locals())
+ canonpath = self._canonical_path(path)
+ exists, _err = utils.trycmd('readlink', '-e',
+ canonpath,
+ run_as_root=True)
+ return exists
+
+ def set_permissions(self, path, mode):
+ LOG.debug(_("Set permissions path=%(path)s mode=%(mode)o") % locals())
+ canonpath = self._canonical_path(path)
+ utils.execute('chmod', "%o" % mode, canonpath, run_as_root=True)
+
+ def set_ownership(self, path, user, group):
+ LOG.debug(_("Set permissions path=%(path)s "
+ "user=%(user)s group=%(group)s") % locals())
+ canonpath = self._canonical_path(path)
+ owner = None
+ cmd = "chown"
+ if group is not None and user is not None:
+ owner = user + ":" + group
+ elif user is not None:
+ owner = user
+ elif group is not None:
+ owner = group
+ cmd = "chgrp"
+
+ if owner is not None:
+ utils.execute(cmd, owner, canonpath, run_as_root=True)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index cb960466f..a8f779e66 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -22,9 +22,30 @@ Driver base-classes:
types that support that contract
"""
-from nova import flags
-from nova.openstack.common import log as logging
+import sys
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova import utils
+
+driver_opts = [
+ cfg.StrOpt('compute_driver',
+ help='Driver to use for controlling virtualization. Options '
+ 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
+ 'fake.FakeDriver, baremetal.BareMetalDriver, '
+ 'vmwareapi.VMWareESXDriver'),
+ cfg.StrOpt('default_ephemeral_format',
+ default=None,
+ help='The default format an ephemeral_volume will be '
+ 'formatted with on creation.'),
+ cfg.BoolOpt('use_cow_images',
+ default=True,
+ help='Whether to use cow images'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
@@ -88,6 +109,7 @@ class ComputeDriver(object):
capabilities = {
"has_imagecache": False,
+ "supports_recreate": False,
}
def __init__(self, virtapi):
@@ -153,6 +175,13 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
+ def list_instance_uuids(self):
+ """
+ Return the UUIDS of all the instances known to the virtualization
+ layer, as a list.
+ """
+ raise NotImplementedError()
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
@@ -180,7 +209,8 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
@@ -192,6 +222,7 @@ class ComputeDriver(object):
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
+ :param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token
@@ -228,7 +259,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -237,6 +268,11 @@ class ComputeDriver(object):
running VM"""
raise NotImplementedError()
+ def get_all_volume_usage(self, context, compute_host_bdms):
+ """Return usage info for volumes attached to vms on
+ a given host"""
+ raise NotImplementedError()
+
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
@@ -244,12 +280,12 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ def attach_volume(self, connection_info, instance, mountpoint):
+ """Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
- def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach the disk attached to the instance"""
+ def detach_volume(self, connection_info, instance, mountpoint):
+ """Detach the disk attached to the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -261,7 +297,7 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
- def snapshot(self, context, instance, image_id):
+ def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshots the specified instance.
@@ -286,13 +322,13 @@ class ComputeDriver(object):
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -302,32 +338,32 @@ class ComputeDriver(object):
raise NotImplementedError()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def resume(self, instance):
- """resume the specified instance"""
+ def resume(self, instance, network_info, block_device_info=None):
+ """resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -336,7 +372,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
@@ -344,27 +380,32 @@ class ComputeDriver(object):
raise NotImplementedError()
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
raise NotImplementedError()
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
+ :param nodename:
+ node which the caller want to get resources from
+ a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, ctxt, instance_ref,
- block_device_info, network_info):
+ block_device_info, network_info,
+ migrate_data=None):
"""Prepare an instance for live migration
:param ctxt: security context
:param instance_ref: instance object that will be migrated
:param block_device_info: instance block device information
:param network_info: instance network information
+ :param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
@@ -520,7 +561,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -551,15 +592,15 @@ class ComputeDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -598,7 +639,7 @@ class ComputeDriver(object):
pass
def inject_network_info(self, instance, nw_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -613,11 +654,6 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def poll_rescued_instances(self, timeout):
- """Poll for rescued instances"""
- # TODO(Vek): Need to pass context in for access to auth_token
- raise NotImplementedError()
-
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
@@ -647,7 +683,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
@@ -691,11 +727,39 @@ class ComputeDriver(object):
raise NotImplementedError()
def legacy_nwinfo(self):
+ """True if the driver requires the legacy network_info format."""
+ # TODO(tr3buchet): update all subclasses and remove this method and
+ # related helpers.
+ raise NotImplementedError(self.legacy_nwinfo)
+
+ def macs_for_instance(self, instance):
+ """What MAC addresses must this instance have?
+
+ Some hypervisors (such as bare metal) cannot do freeform virtualisation
+ of MAC addresses. This method allows drivers to return a set of MAC
+ addresses that the instance is to have. allocate_for_instance will take
+ this into consideration when provisioning networking for the instance.
+
+ Mapping of MAC addresses to actual networks (or permitting them to be
+ freeform) is up to the network implementation layer. For instance,
+ with openflow switches, fixed MAC addresses can still be virtualised
+ onto any L2 domain, with arbitrary VLANs etc, but regular switches
+ require pre-configured MAC->network mappings that will match the
+ actual configuration.
+
+ Most hypervisors can use the default implementation which returns None.
+ Hypervisors with MAC limits should return a set of MAC addresses, which
+ will be supplied to the allocate_for_instance call by the compute
+ manager, and it is up to that call to ensure that all assigned network
+ details are compatible with the set of MAC addresses.
+
+ This is called during spawn_instance by the compute manager.
+
+ :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
+ None means 'no constraints', a set means 'these and only these
+ MAC addresses'.
"""
- Indicate if the driver requires the legacy network_info format.
- """
- # TODO(tr3buchet): update all subclasses and remove this
- return True
+ return None
def manage_image_cache(self, context, all_instances):
"""
@@ -706,6 +770,7 @@ class ComputeDriver(object):
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
+ pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
@@ -716,9 +781,9 @@ class ComputeDriver(object):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
- def undo_aggregate_operation(self, context, op, aggregate_id,
+ def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo for Resource Pools"""
+ """Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
@@ -735,3 +800,73 @@ class ComputeDriver(object):
}
"""
raise NotImplementedError()
+
+ def get_available_nodes(self):
+ """Returns nodenames of all nodes managed by the compute service.
+
+ This method is for multi compute-nodes support. If a driver supports
+ multi compute-nodes, this method returns a list of nodenames managed
+ by the service. Otherwise, this method should return
+ [hypervisor_hostname].
+ """
+ stats = self.get_host_stats(refresh=True)
+ if not isinstance(stats, list):
+ stats = [stats]
+ return [s['hypervisor_hostname'] for s in stats]
+
+ def get_per_instance_usage(self):
+ """Get information about instance resource usage.
+
+ :returns: dict of nova uuid => dict of usage info
+ """
+ return {}
+
+ def instance_on_disk(self, instance):
+ """Checks access of instance files on the host.
+
+ :param instance: instance to lookup
+
+ Returns True if files of an instance with the supplied ID accessible on
+ the host, False otherwise.
+
+ .. note::
+ Used in rebuild for HA implementation and required for validation
+ of access to instance shared disk files
+ """
+ return False
+
+
+def load_compute_driver(virtapi, compute_driver=None):
+ """Load a compute driver module.
+
+ Load the compute driver module specified by the compute_driver
+ configuration option or, if supplied, the driver name supplied as an
+ argument.
+
+ Compute drivers constructors take a VirtAPI object as their first object
+ and this must be supplied.
+
+ :param virtapi: a VirtAPI instance
+ :param compute_driver: a compute driver name to override the config opt
+ :returns: a ComputeDriver instance
+ """
+ if not compute_driver:
+ compute_driver = CONF.compute_driver
+
+ if not compute_driver:
+ LOG.error(_("Compute driver option required, but not specified"))
+ sys.exit(1)
+
+ LOG.info(_("Loading compute driver '%s'") % compute_driver)
+ try:
+ driver = importutils.import_object_ns('nova.virt',
+ compute_driver,
+ virtapi)
+ return utils.check_isinstance(driver, ComputeDriver)
+ except ImportError as e:
+ LOG.error(_("Unable to load the virtualization driver: %s") % (e))
+ sys.exit(1)
+
+
+def compute_driver_matches(match):
+ return CONF.compute_driver.endswith(match)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 79d98c5cf..0a29a6d67 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -26,6 +26,7 @@ semantics of real hypervisor connections.
"""
from nova.compute import power_state
+from nova.compute import task_states
from nova import db
from nova import exception
from nova.openstack.common import log as logging
@@ -36,6 +37,32 @@ from nova.virt import virtapi
LOG = logging.getLogger(__name__)
+_FAKE_NODES = ['fake-mini']
+
+
+def set_nodes(nodes):
+ """Sets FakeDriver's node.list.
+
+ It has effect on the following methods:
+ get_available_nodes()
+ get_available_resource
+ get_host_stats()
+
+ To restore the change, call restore_nodes()
+ """
+ global _FAKE_NODES
+ _FAKE_NODES = nodes
+
+
+def restore_nodes():
+ """Resets FakeDriver's node list modified by set_nodes().
+
+ Usually called from tearDown().
+ """
+ global _FAKE_NODES
+ _FAKE_NODES = ['fake-mini']
+
+
class FakeInstance(object):
def __init__(self, name, state):
@@ -49,14 +76,15 @@ class FakeInstance(object):
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
- """Fake hypervisor driver"""
+ """Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
- self.host_status = {
+ self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': 'fake-mini',
'host_memory_total': 8000000000,
@@ -96,9 +124,10 @@ class FakeDriver(driver.ComputeDriver):
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
- def snapshot(self, context, instance, name):
+ def snapshot(self, context, instance, name, update_task_state):
if not instance['name'] in self.instances:
- raise exception.InstanceNotRunning()
+ raise exception.InstanceNotRunning(instance_id=instance['uuid'])
+ update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
@@ -128,9 +157,6 @@ class FakeDriver(driver.ComputeDriver):
def poll_rebooting_instances(self, timeout, instances):
pass
- def poll_rescued_instances(self, timeout):
- pass
-
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
@@ -161,28 +187,31 @@ class FakeDriver(driver.ComputeDriver):
def suspend(self, instance):
pass
- def resume(self, instance):
+ def resume(self, instance, network_info, block_device_info=None):
pass
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
- LOG.warning("Key '%s' not in instances '%s'" %
- (key, self.instances), instance=instance)
+ LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
+ {'key': key,
+ 'inst': self.instances}, instance=instance)
- def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ def attach_volume(self, connection_info, instance, mountpoint):
+ """Attach the disk to the instance at mountpoint using info."""
+ instance_name = instance['name']
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
- def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach the disk attached to the instance"""
+ def detach_volume(self, connection_info, instance, mountpoint):
+ """Detach the disk attached to the instance."""
try:
- del self._mounts[instance_name][mountpoint]
+ del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
return True
@@ -221,6 +250,13 @@ class FakeDriver(driver.ComputeDriver):
bw = []
return bw
+ def get_all_volume_usage(self, context, instances, start_time,
+ stop_time=None):
+ """Return usage info for volumes attached to vms on
+ a given host"""
+ volusage = []
+ return volusage
+
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
@@ -252,12 +288,14 @@ class FakeDriver(driver.ComputeDriver):
def refresh_provider_fw_rules(self):
pass
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
+ if nodename not in _FAKE_NODES:
+ raise exception.NovaException("node %s is not found" % nodename)
dic = {'vcpus': 1,
'memory_mb': 8192,
@@ -267,7 +305,7 @@ class FakeDriver(driver.ComputeDriver):
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
- 'hypervisor_hostname': 'fake-mini',
+ 'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
@@ -291,7 +329,7 @@ class FakeDriver(driver.ComputeDriver):
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
- return
+ return {}
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
@@ -306,7 +344,7 @@ class FakeDriver(driver.ComputeDriver):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
- network_info):
+ network_info, migrate_data=None):
return
def unfilter_instance(self, instance_ref, network_info):
@@ -314,33 +352,60 @@ class FakeDriver(driver.ComputeDriver):
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
- """ Removes the named VM, as if it crashed. For testing"""
+ """Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
- return self.host_status
+ stats = []
+ for nodename in _FAKE_NODES:
+ host_status = self.host_status_base.copy()
+ host_status['hypervisor_hostname'] = nodename
+ host_status['host_hostname'] = nodename
+ host_status['host_name_label'] = nodename
+ stats.append(host_status)
+ if len(stats) == 0:
+ raise exception.NovaException("FakeDriver has no node")
+ elif len(stats) == 1:
+ return stats[0]
+ else:
+ return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
- pass
+ return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
- pass
+ if not mode:
+ return 'off_maintenance'
+ return 'on_maintenance'
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
- pass
+ if enabled:
+ return 'enabled'
+ return 'disabled'
def get_disk_available_least(self):
- """ """
pass
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
+ def get_available_nodes(self):
+ return _FAKE_NODES
+
+ def instance_on_disk(self, instance):
+ return False
+
+ def list_instance_uuids(self):
+ return []
+
+ def legacy_nwinfo(self):
+ return True
+
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
@@ -353,3 +418,29 @@ class FakeVirtAPI(virtapi.VirtAPI):
def instance_get_all_by_host(self, context, host):
return db.instance_get_all_by_host(context, host)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ return db.aggregate_get_by_host(context, host, key=key)
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ return db.aggregate_metadata_add(context, aggregate['id'], metadata,
+ set_delete=set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ return db.aggregate_metadata_delete(context, aggregate['id'], key)
+
+ def security_group_get_by_instance(self, context, instance):
+ return db.security_group_get_by_instance(context, instance['id'])
+
+ def security_group_rule_get_by_security_group(self, context,
+ security_group):
+ return db.security_group_rule_get_by_security_group(
+ context, security_group['id'])
+
+ def provider_fw_rule_get_all(self, context):
+ return db.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return db.agent_build_get_by_triple(context,
+ hypervisor, os, architecture)
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index ff464e8e3..bbc6034bd 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -17,10 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import config
from nova import context
-from nova import db
-from nova import flags
from nova import network
from nova.network import linux_net
from nova.openstack.common import cfg
@@ -42,8 +39,9 @@ firewall_opts = [
help='Whether to allow network traffic from same network'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(firewall_opts)
+CONF.import_opt('use_ipv6', 'nova.netconf')
def load_driver(default, *args, **kwargs):
@@ -52,26 +50,29 @@ def load_driver(default, *args, **kwargs):
class FirewallDriver(object):
- """ Firewall Driver base class.
+ """Firewall Driver base class.
Defines methods that any driver providing security groups
- and provider fireall functionality should implement.
+ and provider firewall functionality should implement.
"""
+ def __init__(self, virtapi):
+ self._virtapi = virtapi
+
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet."""
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
@@ -125,7 +126,7 @@ class FirewallDriver(object):
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
def _handle_network_info_model(self, network_info):
@@ -140,7 +141,8 @@ class FirewallDriver(object):
class IptablesFirewallDriver(FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
- def __init__(self, **kwargs):
+ def __init__(self, virtapi, **kwargs):
+ super(IptablesFirewallDriver, self).__init__(virtapi)
self.iptables = linux_net.iptables_manager
self.instances = {}
self.network_infos = {}
@@ -295,8 +297,8 @@ class IptablesFirewallDriver(FirewallDriver):
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
def _build_icmp_rule(self, rule, version):
- icmp_type = rule.from_port
- icmp_code = rule.to_port
+ icmp_type = rule['from_port']
+ icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
@@ -314,12 +316,12 @@ class IptablesFirewallDriver(FirewallDriver):
return []
def _build_tcp_udp_rule(self, rule, version):
- if rule.from_port == rule.to_port:
- return ['--dport', '%s' % (rule.from_port,)]
+ if rule['from_port'] == rule['to_port']:
+ return ['--dport', '%s' % (rule['from_port'],)]
else:
return ['-m', 'multiport',
- '--dports', '%s:%s' % (rule.from_port,
- rule.to_port)]
+ '--dports', '%s:%s' % (rule['from_port'],
+ rule['to_port'])]
def instance_rules(self, instance, network_info):
# make sure this is legacy nw_info
@@ -346,32 +348,32 @@ class IptablesFirewallDriver(FirewallDriver):
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
- security_groups = db.security_group_get_by_instance(ctxt,
- instance['id'])
+ security_groups = self._virtapi.security_group_get_by_instance(
+ ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
- rules = db.security_group_rule_get_by_security_group(ctxt,
- security_group['id'])
+ rules = self._virtapi.security_group_rule_get_by_security_group(
+ ctxt, security_group)
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule,
instance=instance)
- if not rule.cidr:
+ if not rule['cidr']:
version = 4
else:
- version = netutils.get_ip_version(rule.cidr)
+ version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
- protocol = rule.protocol
+ protocol = rule['protocol']
if protocol:
- protocol = rule.protocol.lower()
+ protocol = rule['protocol'].lower()
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
@@ -384,9 +386,9 @@ class IptablesFirewallDriver(FirewallDriver):
args += self._build_tcp_udp_rule(rule, version)
elif protocol == 'icmp':
args += self._build_icmp_rule(rule, version)
- if rule.cidr:
- LOG.debug('Using cidr %r', rule.cidr, instance=instance)
- args += ['-s', rule.cidr]
+ if rule['cidr']:
+ LOG.debug('Using cidr %r', rule['cidr'], instance=instance)
+ args += ['-s', rule['cidr']]
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
@@ -479,13 +481,12 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
- @staticmethod
- def _provider_rules():
+ def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
- rules = db.provider_fw_rule_get_all(ctxt)
+ rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
diff --git a/nova/virt/hyperv/baseops.py b/nova/virt/hyperv/baseops.py
index 3d941a854..5b617f898 100644
--- a/nova/virt/hyperv/baseops.py
+++ b/nova/virt/hyperv/baseops.py
@@ -35,6 +35,7 @@ class BaseOps(object):
self.__conn_v2 = None
self.__conn_cimv2 = None
self.__conn_wmi = None
+ self.__conn_storage = None
@property
def _conn(self):
@@ -59,3 +60,10 @@ class BaseOps(object):
if self.__conn_wmi is None:
self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
return self.__conn_wmi
+
+ @property
+ def _conn_storage(self):
+ if self.__conn_storage is None:
+ storage_namespace = '//./Root/Microsoft/Windows/Storage'
+ self.__conn_storage = wmi.WMI(moniker=storage_namespace)
+ return self.__conn_storage
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
new file mode 100644
index 000000000..2352c3bef
--- /dev/null
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import sys
+
+from nova import block_device
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt import driver
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import _winreg
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('my_ip', 'nova.netconf')
+
+
+class BaseVolumeUtils(object):
+
+ def get_iscsi_initiator(self, cim_conn):
+ """Get iscsi initiator name for this machine."""
+
+ computer_system = cim_conn.Win32_ComputerSystem()[0]
+ hostname = computer_system.name
+ keypath = \
+ r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery"
+ try:
+ key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
+ _winreg.KEY_ALL_ACCESS)
+ temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
+ initiator_name = str(temp[0])
+ _winreg.CloseKey(key)
+ except Exception:
+ LOG.info(_("The ISCSI initiator name can't be found. "
+ "Choosing the default one"))
+ computer_system = cim_conn.Win32_ComputerSystem()[0]
+ initiator_name = "iqn.1991-05.com.microsoft:" + \
+ hostname.lower()
+ return {
+ 'ip': CONF.my_ip,
+ 'initiator': initiator_name,
+ }
+
+ def volume_in_mapping(self, mount_device, block_device_info):
+ block_device_list = [block_device.strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(
+ block_device.strip_dev(swap['device_name']))
+ block_device_list += [block_device.strip_dev(
+ ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_dev(mount_device) in block_device_list
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
index 392dcfa13..4be569e88 100644
--- a/nova/virt/hyperv/constants.py
+++ b/nova/virt/hyperv/constants.py
@@ -44,6 +44,30 @@ REQ_POWER_STATE = {
'Suspended': HYPERV_VM_STATE_SUSPENDED,
}
+WMI_WIN32_PROCESSOR_ARCHITECTURE = {
+ 0: 'x86',
+ 1: 'MIPS',
+ 2: 'Alpha',
+ 3: 'PowerPC',
+ 5: 'ARM',
+ 6: 'Itanium-based systems',
+ 9: 'x64',
+}
+
+PROCESSOR_FEATURE = {
+ 7: '3dnow',
+ 3: 'mmx',
+ 12: 'nx',
+ 9: 'pae',
+ 8: 'rdtsc',
+ 20: 'slat',
+ 13: 'sse3',
+ 21: 'vmx',
+ 6: 'sse',
+ 10: 'sse2',
+ 17: 'xsave',
+}
+
WMI_JOB_STATUS_STARTED = 4096
WMI_JOB_STATE_RUNNING = 4
WMI_JOB_STATE_COMPLETED = 7
@@ -52,3 +76,6 @@ VM_SUMMARY_NUM_PROCS = 4
VM_SUMMARY_ENABLED_STATE = 100
VM_SUMMARY_MEMORY_USAGE = 103
VM_SUMMARY_UPTIME = 105
+
+IDE_DISK = "VHD"
+IDE_DVD = "DVD"
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 6d9f66ff8..799ef7172 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -91,38 +91,36 @@ class HyperVDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- self._vmops.spawn(context, instance, image_meta, network_info,
- block_device_info)
+ self._vmops.spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
self._vmops.reboot(instance, network_info, reboot_type)
- def destroy(self, instance, network_info=None, cleanup=True):
- self._vmops.destroy(instance, network_info, cleanup)
+ def destroy(self, instance, network_info=None, cleanup=True,
+ destroy_disks=True):
+ self._vmops.destroy(instance, network_info, cleanup, destroy_disks)
def get_info(self, instance):
return self._vmops.get_info(instance)
- def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach volume storage to VM instance"""
+ def attach_volume(self, connection_info, instance, mountpoint):
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
- instance_name,
+ instance['name'],
mountpoint)
- def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach volume storage to VM instance"""
+ def detach_volume(self, connection_info, instance, mountpoint):
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
- instance_name,
+ instance['name'],
mountpoint)
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
- def poll_rescued_instances(self, timeout):
- pass
-
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_host_stats(self, refresh=False):
@@ -131,12 +129,8 @@ class HyperVDriver(driver.ComputeDriver):
def host_power_action(self, host, action):
return self._hostops.host_power_action(host, action)
- def set_host_enabled(self, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- pass
-
- def snapshot(self, context, instance, name):
- self._snapshotops.snapshot(context, instance, name)
+ def snapshot(self, context, instance, name, update_task_state):
+ self._snapshotops.snapshot(context, instance, name, update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
@@ -147,7 +141,7 @@ class HyperVDriver(driver.ComputeDriver):
def suspend(self, instance):
self._vmops.suspend(instance)
- def resume(self, instance):
+ def resume(self, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance):
@@ -165,7 +159,7 @@ class HyperVDriver(driver.ComputeDriver):
return self._livemigrationops.compare_cpu(cpu_info)
def pre_live_migration(self, context, instance, block_device_info,
- network_info):
+ network_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info, network_info)
@@ -197,22 +191,22 @@ class HyperVDriver(driver.ComputeDriver):
instance=instance_ref)
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
LOG.debug(_("unfilter_instance called"), instance=instance)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
LOG.debug(_("confirm_migration called"), instance=instance)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
LOG.debug(_("finish_revert_migration called"), instance=instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
LOG.debug(_("finish_migration called"), instance=instance)
def get_console_output(self, instance):
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index a2f9d5904..5cbe46c1c 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -18,12 +18,18 @@
"""
Management class for host operations.
"""
+import ctypes
import multiprocessing
+import os
import platform
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -32,6 +38,35 @@ class HostOps(baseops.BaseOps):
super(HostOps, self).__init__()
self._stats = None
+ def _get_cpu_info(self):
+ """Get the CPU information.
+ :returns: A dictionary containing the main properties
+ of the central processor in the hypervisor.
+ """
+ cpu_info = dict()
+ processor = self._conn_cimv2.query(
+ "SELECT * FROM Win32_Processor WHERE ProcessorType = 3")
+
+ cpu_info['arch'] = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE\
+ .get(processor[0].Architecture, 'Unknown')
+ cpu_info['model'] = processor[0].Name
+ cpu_info['vendor'] = processor[0].Manufacturer
+
+ topology = dict()
+ topology['sockets'] = len(processor)
+ topology['cores'] = processor[0].NumberOfCores
+ topology['threads'] = processor[0].NumberOfLogicalProcessors\
+ / processor[0].NumberOfCores
+ cpu_info['topology'] = topology
+
+ features = list()
+ for fkey, fname in constants.PROCESSOR_FEATURE.items():
+ if ctypes.windll.kernel32.IsProcessorFeaturePresent(fkey):
+ features.append(fname)
+ cpu_info['features'] = features
+
+ return jsonutils.dumps(cpu_info)
+
def _get_vcpu_total(self):
"""Get vcpu number of physical computer.
:returns: the number of cpu core.
@@ -55,22 +90,24 @@ class HostOps(baseops.BaseOps):
total_mb = long(total_kb) / 1024
return total_mb
- def _get_local_gb_total(self):
- """Get the total hdd size(GB) of physical computer.
+ def _get_local_hdd_info_gb(self):
+ """Get the total and used size of the volume containing
+ CONF.instances_path expressed in GB.
:returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
+ A tuple with the total and used space in GB.
"""
- #TODO(jordanrinke): This binds to C only right now,
- #need to bind to instance dir
- total_kb = self._conn_cimv2.query(
- "SELECT Size FROM win32_logicaldisk WHERE DriveType=3")[0].Size
- total_gb = long(total_kb) / (1024 ** 3)
- return total_gb
+ normalized_path = os.path.normpath(CONF.instances_path)
+ drive, path = os.path.splitdrive(normalized_path)
+ hdd_info = self._conn_cimv2.query(
+ ("SELECT FreeSpace,Size FROM win32_logicaldisk WHERE DeviceID='%s'"
+ ) % drive)[0]
+ total_gb = long(hdd_info.Size) / (1024 ** 3)
+ free_gb = long(hdd_info.FreeSpace) / (1024 ** 3)
+ used_gb = total_gb - free_gb
+ return total_gb, used_gb
def _get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
+ """Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
#TODO(jordanrinke) figure out a way to count assigned VCPUs
@@ -88,21 +125,6 @@ class HostOps(baseops.BaseOps):
return total_mb
- def _get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
- """
- #TODO(jordanrinke): This binds to C only right now,
- #need to bind to instance dir
- total_kb = self._conn_cimv2.query(
- "SELECT FreeSpace FROM win32_logicaldisk WHERE DriveType=3")[0]\
- .FreeSpace
- total_gb = long(total_kb) / (1024 ** 3)
- return total_gb
-
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
@@ -123,17 +145,17 @@ class HostOps(baseops.BaseOps):
"""
LOG.info(_('get_available_resource called'))
- # TODO(alexpilotti) implemented cpu_info
+ local_gb, used_gb = self._get_local_hdd_info_gb()
dic = {'vcpus': self._get_vcpu_total(),
'memory_mb': self._get_memory_mb_total(),
- 'local_gb': self._get_local_gb_total(),
+ 'local_gb': local_gb,
'vcpus_used': self._get_vcpu_used(),
'memory_mb_used': self._get_memory_mb_used(),
- 'local_gb_used': self._get_local_gb_used(),
+ 'local_gb_used': used_gb,
'hypervisor_type': "hyperv",
'hypervisor_version': self._get_hypervisor_version(),
'hypervisor_hostname': platform.node(),
- 'cpu_info': 'unknown'}
+ 'cpu_info': self._get_cpu_info()}
return dic
@@ -141,8 +163,7 @@ class HostOps(baseops.BaseOps):
LOG.debug(_("Updating host stats"))
data = {}
- data["disk_total"] = self._get_local_gb_total()
- data["disk_used"] = self._get_local_gb_used()
+ data["disk_total"], data["disk_used"] = self._get_local_hdd_info_gb()
data["disk_available"] = data["disk_total"] - data["disk_used"]
data["host_memory_total"] = self._get_memory_mb_total()
data["host_memory_overhead"] = self._get_memory_mb_used()
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 1bd6ab05f..232cbd660 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -21,9 +21,8 @@ Management class for live migration VM operations.
import os
import sys
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
@@ -35,7 +34,8 @@ if sys.platform == 'win32':
import wmi
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('use_cow_images', 'nova.virt.driver')
class LiveMigrationOps(baseops.BaseOps):
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index 65b123932..cdc6e45a4 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -22,10 +22,10 @@ import os
import shutil
import sys
-from nova import config
+from nova.compute import task_states
from nova import exception
-from nova import flags
from nova.image import glance
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
@@ -37,7 +37,7 @@ from xml.etree import ElementTree
if sys.platform == 'win32':
import wmi
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -46,7 +46,7 @@ class SnapshotOps(baseops.BaseOps):
super(SnapshotOps, self).__init__()
self._vmutils = vmutils.VMUtils()
- def snapshot(self, context, instance, name):
+ def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
vm = self._vmutils.lookup(self._conn, instance_name)
@@ -71,6 +71,8 @@ class SnapshotOps(baseops.BaseOps):
raise vmutils.HyperVException(
_('Failed to create snapshot for VM %s') %
instance_name)
+ else:
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_folder = None
f = None
@@ -165,6 +167,8 @@ class SnapshotOps(baseops.BaseOps):
_("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s"),
locals())
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
glance_image_service.update(context, image_id, image_metadata, f)
LOG.debug(_("Snapshot image %(image_id)s updated for VM "
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
new file mode 100644
index 000000000..a898d3ac2
--- /dev/null
+++ b/nova/virt/hyperv/vif.py
@@ -0,0 +1,133 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 Cloudbase Solutions Srl
+# Copyright 2013 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import sys
+import uuid
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+from abc import abstractmethod
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import vmutils
+
+hyperv_opts = [
+ cfg.StrOpt('vswitch_name',
+ default=None,
+ help='External virtual switch Name, '
+ 'if not provided, the first external virtual '
+ 'switch is used'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(hyperv_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+class HyperVBaseVIFDriver(object):
+ @abstractmethod
+ def plug(self, instance, vif):
+ pass
+
+ @abstractmethod
+ def unplug(self, instance, vif):
+ pass
+
+
+class HyperVQuantumVIFDriver(HyperVBaseVIFDriver):
+ """Quantum VIF driver."""
+
+ def plug(self, instance, vif):
+ # Quantum takes care of plugging the port
+ pass
+
+ def unplug(self, instance, vif):
+ # Quantum takes care of unplugging the port
+ pass
+
+
+class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
+ """Nova network VIF driver."""
+
+ def __init__(self):
+ self._vmutils = vmutils.VMUtils()
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % CONF.vswitch_name)
+ if CONF.vswitch_name:
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % CONF.vswitch_name)
+ bound = self._conn.Msvm_VirtualSwitch(
+ ElementName=CONF.vswitch_name)
+ else:
+ LOG.debug(_("No vSwitch specified, attaching to default"))
+ self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ if CONF.vswitch_name:
+ return self._conn.Msvm_VirtualSwitch(
+ ElementName=CONF.vswitch_name)[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+ else:
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def plug(self, instance, vif):
+ extswitch = self._find_external_network()
+ if extswitch is None:
+ raise vmutils.HyperVException(_('Cannot find vSwitch'))
+
+ vm_name = instance['name']
+
+ nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData(
+ ElementName=vif['id'])[0]
+
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(
+ Name=str(uuid.uuid4()),
+ FriendlyName=vm_name,
+ ScopeOfResidence="",
+ VirtualSwitch=extswitch.path_())
+ if ret_val != 0:
+ LOG.error(_('Failed creating a port on the external vswitch'))
+ raise vmutils.HyperVException(_('Failed creating port for %s') %
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
+
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
+ nic_data.Connection = [new_port]
+ self._vmutils.modify_virt_resource(self._conn, nic_data, vm)
+
+ def unplug(self, instance, vif):
+ #TODO(alepilotti) Not implemented
+ pass
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index d252fbfb3..3d8958266 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -21,13 +21,14 @@ Management class for basic VM operations.
import os
import uuid
-from nova import config
-from nova import db
+from nova.api.metadata import base as instance_metadata
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt import configdrive
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
@@ -35,39 +36,64 @@ from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
- cfg.StrOpt('vswitch_name',
- default=None,
- help='Default vSwitch Name, '
- 'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
- help='required for live migration among '
- 'hosts with different CPU features')
+ help='Required for live migration among '
+ 'hosts with different CPU features'),
+ cfg.BoolOpt('config_drive_inject_password',
+ default=False,
+ help='Sets the admin password in the config drive image'),
+ cfg.StrOpt('qemu_img_cmd',
+ default="qemu-img.exe",
+ help='qemu-img is used to convert between '
+ 'different image types'),
+ cfg.BoolOpt('config_drive_cdrom',
+ default=False,
+ help='Attaches the Config Drive image as a cdrom drive '
+ 'instead of a disk drive')
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
+CONF.import_opt('use_cow_images', 'nova.virt.driver')
+CONF.import_opt('network_api_class', 'nova.network')
class VMOps(baseops.BaseOps):
+ _vif_driver_class_map = {
+ 'nova.network.quantumv2.api.API':
+ 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver',
+ 'nova.network.api.API':
+ 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
+ }
+
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
+ self._load_vif_driver_class()
+
+ def _load_vif_driver_class(self):
+ try:
+ class_name = self._vif_driver_class_map[CONF.network_api_class]
+ self._vif_driver = importutils.import_object(class_name)
+ except KeyError:
+ raise TypeError(_("VIF driver not found for "
+ "network_api_class: %s") %
+ CONF.network_api_class)
def list_instances(self):
- """ Return the names of all the instances known to Hyper-V. """
+ """Return the names of all the instances known to Hyper-V."""
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
- """Get information about the VM"""
+ """Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
- instance_name = instance["name"]
- return self._get_info(instance_name)
+ return self._get_info(instance['name'])
def _get_info(self, instance_name):
vm = self._vmutils.lookup(self._conn, instance_name)
@@ -90,12 +116,12 @@ class VMOps(baseops.BaseOps):
info = summary_info[0]
LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
- state = str(constants.HYPERV_POWER_STATE[info.EnabledState])
+ state = constants.HYPERV_POWER_STATE[info.EnabledState]
memusage = str(info.MemoryUsage)
numprocs = str(info.NumberOfProcessors)
uptime = str(info.UpTime)
- LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)s,"
+ LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d,"
" mem=%(memusage)s, num_cpu=%(numprocs)s,"
" uptime=%(uptime)s"), locals())
@@ -105,13 +131,12 @@ class VMOps(baseops.BaseOps):
'num_cpu': info.NumberOfProcessors,
'cpu_time': info.UpTime}
- def spawn(self, context, instance, image_meta, network_info,
- block_device_info=None):
- """ Create a new VM and start it."""
- instance_name = instance["name"]
- vm = self._vmutils.lookup(self._conn, instance_name)
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info=None):
+ """Create a new VM and start it."""
+ vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is not None:
- raise exception.InstanceExists(name=instance_name)
+ raise exception.InstanceExists(name=instance['name'])
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
@@ -120,7 +145,7 @@ class VMOps(baseops.BaseOps):
#If is not a boot from volume spawn
if not (ebs_root):
#Fetch the file, assume it is a VHD file.
- vhdfile = self._vmutils.get_vhd_path(instance_name)
+ vhdfile = self._vmutils.get_vhd_path(instance['name'])
try:
self._cache_image(fn=self._vmutils.fetch_image,
context=context,
@@ -138,33 +163,85 @@ class VMOps(baseops.BaseOps):
self._create_vm(instance)
if not ebs_root:
- self._create_disk(instance['name'], vhdfile)
+ self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
+ constants.IDE_DISK)
else:
self._volumeops.attach_boot_volume(block_device_info,
- instance_name)
+ instance['name'])
#A SCSI controller for volumes connection is created
self._create_scsi_controller(instance['name'])
for vif in network_info:
- mac_address = vif['address'].replace(':', '')
- self._create_nic(instance['name'], mac_address)
+ self._create_nic(instance['name'], vif)
+ self._vif_driver.plug(instance, vif)
- LOG.debug(_('Starting VM %s '), instance_name)
+ if configdrive.required_by(instance):
+ self._create_config_drive(instance, injected_files,
+ admin_password)
+
+ LOG.debug(_('Starting VM %s '), instance['name'])
self._set_vm_state(instance['name'], 'Enabled')
- LOG.info(_('Started VM %s '), instance_name)
+ LOG.info(_('Started VM %s '), instance['name'])
except Exception as exn:
LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
- raise
+ raise exn
+
+ def _create_config_drive(self, instance, injected_files, admin_password):
+ if CONF.config_drive_format != 'iso9660':
+ vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
+ CONF.config_drive_format)
+
+ LOG.info(_('Using config drive'), instance=instance)
+ extra_md = {}
+ if admin_password and CONF.config_drive_inject_password:
+ extra_md['admin_pass'] = admin_password
+
+ inst_md = instance_metadata.InstanceMetadata(instance,
+ content=injected_files, extra_md=extra_md)
+
+ instance_path = self._vmutils.get_instance_path(
+ instance['name'])
+ configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
+ LOG.info(_('Creating config drive at %(path)s'),
+ {'path': configdrive_path_iso}, instance=instance)
+
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
+ try:
+ cdb.make_drive(configdrive_path_iso)
+ except exception.ProcessExecutionError, e:
+ LOG.error(_('Creating config drive failed with error: %s'),
+ e, instance=instance)
+ raise
+
+ if not CONF.config_drive_cdrom:
+ drive_type = constants.IDE_DISK
+ configdrive_path = os.path.join(instance_path,
+ 'configdrive.vhd')
+ utils.execute(CONF.qemu_img_cmd,
+ 'convert',
+ '-f',
+ 'raw',
+ '-O',
+ 'vpc',
+ configdrive_path_iso,
+ configdrive_path,
+ attempts=1)
+ os.remove(configdrive_path_iso)
+ else:
+ drive_type = constants.IDE_DVD
+ configdrive_path = configdrive_path_iso
+
+ self._attach_ide_drive(instance['name'], configdrive_path, 1, 0,
+ drive_type)
def _create_vm(self, instance):
- """Create a VM but don't start it. """
- instance_name = instance["name"]
+ """Create a VM but don't start it."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
- vs_gs_data.ElementName = instance_name
+ vs_gs_data.ElementName = instance["name"]
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
[], None, vs_gs_data.GetText_(1))[1:]
if ret_val == constants.WMI_JOB_STATUS_STARTED:
@@ -174,10 +251,10 @@ class VMOps(baseops.BaseOps):
if not success:
raise vmutils.HyperVException(_('Failed to create VM %s') %
- instance_name)
+ instance["name"])
- LOG.debug(_('Created VM %s...'), instance_name)
- vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+ LOG.debug(_('Created VM %s...'), instance["name"])
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
@@ -193,7 +270,7 @@ class VMOps(baseops.BaseOps):
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
- LOG.debug(_('Set memory for vm %s...'), instance_name)
+ LOG.debug(_('Set memory for vm %s...'), instance["name"])
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
@@ -206,10 +283,10 @@ class VMOps(baseops.BaseOps):
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
- LOG.debug(_('Set vcpus for vm %s...'), instance_name)
+ LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
- """ Create an iscsi controller ready to mount volumes """
+ """Create an iscsi controller ready to mount volumes."""
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
@@ -230,101 +307,103 @@ class VMOps(baseops.BaseOps):
_('Failed to add scsi controller to VM %s') %
vm_name)
- def _create_disk(self, vm_name, vhdfile):
- """Create a disk and attach it to the vm"""
- LOG.debug(_('Creating disk for %(vm_name)s by attaching'
- ' disk file %(vhdfile)s') % locals())
+ def _get_ide_controller(self, vm, ctrller_addr):
#Find the IDE controller for the vm.
- vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
- and r.Address == "0"]
+ and r.Address == str(ctrller_addr)]
+ return ctrller
+
+ def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
+ drive_type=constants.IDE_DISK):
+ """Create an IDE drive and attach it to the vm."""
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(path)s') % locals())
+
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
+ ctrller = self._get_ide_controller(vm, ctrller_addr)
+
+ if drive_type == constants.IDE_DISK:
+ resSubType = 'Microsoft Synthetic Disk Drive'
+ elif drive_type == constants.IDE_DVD:
+ resSubType = 'Microsoft Synthetic DVD Drive'
+
#Find the default disk drive object for the vm and clone it.
- diskdflt = self._conn.query(
+ drivedflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
- diskdrive = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', diskdflt)
+ WHERE ResourceSubType LIKE '%(resSubType)s'\
+ AND InstanceID LIKE '%%Default%%'" % locals())[0]
+ drive = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', drivedflt)
#Set the IDE ctrller as parent.
- diskdrive.Parent = ctrller[0].path_()
- diskdrive.Address = 0
+ drive.Parent = ctrller[0].path_()
+ drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
- diskdrive, vm)
+ drive, vm)
if new_resources is None:
raise vmutils.HyperVException(
- _('Failed to add diskdrive to VM %s') %
+ _('Failed to add drive to VM %s') %
vm_name)
- diskdrive_path = new_resources[0]
- LOG.debug(_('New disk drive path is %s'), diskdrive_path)
+ drive_path = new_resources[0]
+ LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') %
+ locals())
+
+ if drive_type == constants.IDE_DISK:
+ resSubType = 'Microsoft Virtual Hard Disk'
+ elif drive_type == constants.IDE_DVD:
+ resSubType = 'Microsoft Virtual CD/DVD Disk'
+
#Find the default VHD disk object.
- vhddefault = self._conn.query(
+ drivedefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
- InstanceID LIKE '%Default%' ")[0]
+ WHERE ResourceSubType LIKE '%(resSubType)s' AND \
+ InstanceID LIKE '%%Default%%' " % locals())[0]
#Clone the default and point it to the image file.
- vhddisk = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', vhddefault)
+ res = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', drivedefault)
#Set the new drive as the parent.
- vhddisk.Parent = diskdrive_path
- vhddisk.Connection = [vhdfile]
+ res.Parent = drive_path
+ res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
- new_resources = self._vmutils.add_virt_resource(self._conn,
- vhddisk, vm)
+ new_resources = self._vmutils.add_virt_resource(self._conn, res, vm)
if new_resources is None:
raise vmutils.HyperVException(
- _('Failed to add vhd file to VM %s') %
- vm_name)
- LOG.info(_('Created disk for %s'), vm_name)
+ _('Failed to add %(drive_type)s image to VM %(vm_name)s') %
+ locals())
+ LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
+ locals())
- def _create_nic(self, vm_name, mac):
- """Create a (synthetic) nic and attach it to the vm"""
+ def _create_nic(self, vm_name, vif):
+ """Create a (synthetic) nic and attach it to the vm."""
LOG.debug(_('Creating nic for %s '), vm_name)
- #Find the vswitch that is connected to the physical nic.
- vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
- extswitch = self._find_external_network()
- if extswitch is None:
- raise vmutils.HyperVException(_('Cannot find vSwitch'))
- vm = vms[0]
- switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
- #Find the default nic and clone it to create a new nic for the vm.
- #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
- #Linux Integration Components installed.
+ #Create a new nic
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
- #Create a port on the vswitch.
- (new_port, ret_val) = switch_svc.CreateSwitchPort(
- Name=str(uuid.uuid4()),
- FriendlyName=vm_name,
- ScopeOfResidence="",
- VirtualSwitch=extswitch.path_())
- if ret_val != 0:
- LOG.error(_('Failed creating a port on the external vswitch'))
- raise vmutils.HyperVException(_('Failed creating port for %s') %
- vm_name)
- ext_path = extswitch.path_()
- LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
- % locals())
- #Connect the new nic to the new port.
- new_nic_data.Connection = [new_port]
- new_nic_data.ElementName = vm_name + ' nic'
- new_nic_data.Address = mac
+
+ #Configure the nic
+ new_nic_data.ElementName = vif['id']
+ new_nic_data.Address = vif['address'].replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- #Add the new nic to the vm.
+
+ #Add the new nic to the vm
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
@@ -332,52 +411,24 @@ class VMOps(baseops.BaseOps):
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
- def _find_external_network(self):
- """Find the vswitch that is connected to the physical nic.
- Assumes only one physical nic on the host
- """
- #If there are no physical nics connected to networks, return.
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- if CONF.vswitch_name:
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- bound = self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)
- else:
- LOG.debug(_("No vSwitch specified, attaching to default"))
- self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
- if len(bound) == 0:
- return None
- if CONF.vswitch_name:
- return self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)[0]\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
- else:
- return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
-
def reboot(self, instance, network_info, reboot_type):
- instance_name = instance["name"]
"""Reboot the specified instance."""
- vm = self._vmutils.lookup(self._conn, instance_name)
+ vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
raise exception.InstanceNotFound(instance_id=instance["id"])
- self._set_vm_state(instance_name, 'Reboot')
+ self._set_vm_state(instance['name'], 'Reboot')
- def destroy(self, instance, network_info=None, cleanup=True):
- """Destroy the VM. Also destroy the associated VHD disk files"""
- instance_name = instance["name"]
- LOG.debug(_("Got request to destroy vm %s"), instance_name)
- vm = self._vmutils.lookup(self._conn, instance_name)
+ def destroy(self, instance, network_info=None, cleanup=True,
+ destroy_disks=True):
+ """Destroy the VM. Also destroy the associated VHD disk files."""
+ LOG.debug(_("Got request to destroy vm %s"), instance['name'])
+ vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
return
- vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
- self._set_vm_state(instance_name, 'Disabled')
+ self._set_vm_state(instance['name'], 'Disabled')
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
@@ -405,18 +456,19 @@ class VMOps(baseops.BaseOps):
success = True
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
- instance_name)
- #Disconnect volumes
- for volume_drive in volumes_drives_list:
- self._volumeops.disconnect_volume(volume_drive)
- #Delete associated vhd disk files.
- for disk in disk_files:
- vhdfile = self._conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- disk.replace("'", "''") + "'")[0]
- LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
- % locals())
- vhdfile.Delete()
+ instance['name'])
+ if destroy_disks:
+ #Disconnect volumes
+ for volume_drive in volumes_drives_list:
+ self._volumeops.disconnect_volume(volume_drive)
+ #Delete associated vhd disk files.
+ for disk in disk_files:
+ vhdfile = self._conn_cimv2.query(
+ "Select * from CIM_DataFile where Name = '" +
+ disk.replace("'", "''") + "'")[0]
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
+ % {'vhdfile': vhdfile, 'name': instance['name']})
+ vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""
@@ -445,12 +497,12 @@ class VMOps(baseops.BaseOps):
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
- """Set the desired state of the VM"""
+ """Set the desired state of the VM."""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 789d74149..d899f977d 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -25,9 +25,8 @@ import sys
import time
import uuid
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
from nova.virt import images
@@ -36,7 +35,7 @@ from nova.virt import images
if sys.platform == 'win32':
import wmi
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -56,9 +55,8 @@ class VMUtils(object):
else:
return vms[0].ElementName
- #TODO(alexpilotti): use the reactor to poll instead of sleep
def check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
@@ -66,21 +64,39 @@ class VMUtils(object):
time.sleep(0.1)
job = wmi.WMI(moniker=job_wmi_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
- LOG.debug(_("WMI job failed: %(ErrorSummaryDescription)s - "
- "%(ErrorDescription)s - %(ErrorCode)s") % job)
+ job_state = job.JobState
+ if job.path().Class == "Msvm_ConcreteJob":
+ err_sum_desc = job.ErrorSummaryDescription
+ err_desc = job.ErrorDescription
+ err_code = job.ErrorCode
+ LOG.debug(_("WMI job failed with status %(job_state)d. "
+ "Error details: %(err_sum_desc)s - %(err_desc)s - "
+ "Error code: %(err_code)d") % locals())
+ else:
+ (error, ret_val) = job.GetError()
+ if not ret_val and error:
+ LOG.debug(_("WMI job failed with status %(job_state)d. "
+ "Error details: %(error)s") % locals())
+ else:
+ LOG.debug(_("WMI job failed with status %(job_state)d. "
+ "No error description available") % locals())
return False
desc = job.Description
elap = job.ElapsedTime
- LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s")
% locals())
return True
+ def get_instance_path(self, instance_name):
+ instance_path = os.path.join(CONF.instances_path, instance_name)
+ if not os.path.exists(instance_path):
+ LOG.debug(_('Creating folder %s '), instance_path)
+ os.makedirs(instance_path)
+ return instance_path
+
def get_vhd_path(self, instance_name):
- base_vhd_folder = os.path.join(CONF.instances_path, instance_name)
- if not os.path.exists(base_vhd_folder):
- LOG.debug(_('Creating folder %s '), base_vhd_folder)
- os.makedirs(base_vhd_folder)
- return os.path.join(base_vhd_folder, instance_name + ".vhd")
+ instance_path = self.get_instance_path(instance_name)
+ return os.path.join(instance_path, instance_name + ".vhd")
def get_base_vhd_path(self, image_name):
base_dir = os.path.join(CONF.instances_path, '_base')
@@ -99,7 +115,7 @@ class VMUtils(object):
return export_folder
def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
- """Clone a WMI object"""
+ """Clone a WMI object."""
cl = conn.__getattr__(wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
@@ -114,7 +130,7 @@ class VMUtils(object):
return newinst
def add_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Adds a new resource to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, new_resources, ret_val) = vs_man_svc.\
AddVirtualSystemResources([res_setting_data.GetText_(1)],
@@ -129,8 +145,20 @@ class VMUtils(object):
else:
return None
+ def modify_virt_resource(self, conn, res_setting_data, target_vm):
+ """Updates a VM resource."""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ ResourceSettingData=[res_setting_data.GetText_(1)],
+ ComputerSystem=target_vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ return success
+
def remove_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Removes a VM resource."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.\
RemoveVirtualSystemResources([res_setting_data.path_()],
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index d2726f871..200236233 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -21,14 +21,13 @@ Management class for Storage-related functions (attach, detach, etc).
import time
from nova import block_device
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import baseops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
+from nova.virt.hyperv import volumeutilsV2
LOG = logging.getLogger(__name__)
@@ -39,10 +38,14 @@ hyper_volumeops_opts = [
cfg.IntOpt('hyperv_wait_between_attach_retry',
default=5,
help='The seconds to wait between an volume attachment attempt'),
+ cfg.BoolOpt('force_volumeutils_v1',
+ default=False,
+ help='Force volumeutils v1'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts)
+CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(baseops.BaseOps):
@@ -63,10 +66,27 @@ class VolumeOps(baseops.BaseOps):
CONF.hyperv_attaching_volume_retry_count
self._wait_between_attach_retry = \
CONF.hyperv_wait_between_attach_retry
- self._volutils = volumeutils.VolumeUtils()
+ self._volutils = self._get_volume_utils()
+
+ def _get_volume_utils(self):
+ if(not CONF.force_volumeutils_v1) and \
+ (self._get_hypervisor_version() >= 6.2):
+ return volumeutilsV2.VolumeUtilsV2(
+ self._conn_storage, self._conn_wmi)
+ else:
+ return volumeutils.VolumeUtils(self._conn_wmi)
+
+ def _get_hypervisor_version(self):
+ """Get hypervisor version.
+ :returns: hypervisor version (ex. 12003)
+ """
+ version = self._conn_cimv2.Win32_OperatingSystem()[0]\
+ .Version
+ LOG.info(_('Windows version: %s ') % version)
+ return version
def attach_boot_volume(self, block_device_info, vm_name):
- """Attach the boot volume to the IDE controller"""
+ """Attach the boot volume to the IDE controller."""
LOG.debug(_("block device info: %s"), block_device_info)
ebs_root = self._driver.block_device_info_get_mapping(
block_device_info)[0]
@@ -96,7 +116,7 @@ class VolumeOps(baseops.BaseOps):
self._attach_volume_to_controller(ctrller, 0, mounted_disk, vm)
except Exception as exn:
LOG.exception(_('Attach boot from volume failed: %s'), exn)
- self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(
_('Unable to attach boot volume to instance %s')
% vm_name)
@@ -106,7 +126,7 @@ class VolumeOps(baseops.BaseOps):
block_device_info)
def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach a volume to the SCSI controller"""
+ """Attach a volume to the SCSI controller."""
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
@@ -133,14 +153,14 @@ class VolumeOps(baseops.BaseOps):
mounted_disk, vm)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
- self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(
_('Unable to attach volume to instance %s')
% instance_name)
def _attach_volume_to_controller(self, controller, address, mounted_disk,
instance):
- """Attach a volume to a controller """
+ """Attach a volume to a controller."""
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
@@ -167,7 +187,7 @@ class VolumeOps(baseops.BaseOps):
return len(volumes)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Dettach a volume to the SCSI controller"""
+ """Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
@@ -199,7 +219,7 @@ class VolumeOps(baseops.BaseOps):
_('Failed to remove volume from VM %s') %
instance_name)
#Sending logout
- self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ self._volutils.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index b4ec7dc7d..051c37fd6 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -20,64 +20,39 @@ Helper methods for operations related to the management of volumes,
and storage repositories
"""
-import subprocess
-import sys
import time
-from nova import block_device
-from nova import config
-from nova import flags
+from eventlet.green import subprocess
+
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova.virt import driver
+from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import vmutils
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import _winreg
-
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
+
+
+class VolumeUtils(basevolumeutils.BaseVolumeUtils):
+ def __init__(self, conn_wmi):
+ self._conn_wmi = conn_wmi
-class VolumeUtils(object):
def execute(self, *args, **kwargs):
+ _PIPE = subprocess.PIPE # pylint: disable=E1101
proc = subprocess.Popen(
[args],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
+ stdin=_PIPE,
+ stdout=_PIPE,
+ stderr=_PIPE,
)
stdout_value, stderr_value = proc.communicate()
if stdout_value.find('The operation completed successfully') == -1:
raise vmutils.HyperVException(_('An error has occurred when '
'calling the iscsi initiator: %s') % stdout_value)
- def get_iscsi_initiator(self, cim_conn):
- """Get iscsi initiator name for this machine"""
-
- computer_system = cim_conn.Win32_ComputerSystem()[0]
- hostname = computer_system.name
- keypath = \
- r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery"
- try:
- key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
- _winreg.KEY_ALL_ACCESS)
- temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
- initiator_name = str(temp[0])
- _winreg.CloseKey(key)
- except Exception:
- LOG.info(_("The ISCSI initiator name can't be found. "
- "Choosing the default one"))
- computer_system = cim_conn.Win32_ComputerSystem()[0]
- initiator_name = "iqn.1991-05.com.microsoft:" + \
- hostname.lower()
- return {
- 'ip': CONF.my_ip,
- 'initiator': initiator_name,
- }
-
def login_storage_target(self, target_lun, target_iqn, target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -89,35 +64,18 @@ class VolumeUtils(object):
self.execute('iscsicli.exe ' + 'LisTargets')
#Sending login
self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
- #Waiting the disk to be mounted. Research this
+ #Waiting the disk to be mounted. Research this to avoid sleep
time.sleep(CONF.hyperv_wait_between_attach_retry)
- def logout_storage_target(self, _conn_wmi, target_iqn):
- """ Logs out storage target through its session id """
+ def logout_storage_target(self, target_iqn):
+ """Logs out storage target through its session id."""
- sessions = _conn_wmi.query(
+ sessions = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
WHERE TargetName='" + target_iqn + "'")
for session in sessions:
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
- """ Executes log out of the session described by its session ID """
+ """Executes log out of the session described by its session ID."""
self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
-
- def volume_in_mapping(self, mount_device, block_device_info):
- block_device_list = [block_device.strip_dev(vol['mount_device'])
- for vol in
- driver.block_device_info_get_mapping(
- block_device_info)]
- swap = driver.block_device_info_get_swap(block_device_info)
- if driver.swap_is_usable(swap):
- block_device_list.append(
- block_device.strip_dev(swap['device_name']))
- block_device_list += [block_device.strip_dev(
- ephemeral['device_name'])
- for ephemeral in
- driver.block_device_info_get_ephemerals(block_device_info)]
-
- LOG.debug(_("block_device_list %s"), block_device_list)
- return block_device.strip_dev(mount_device) in block_device_list
diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py
new file mode 100644
index 000000000..6f5bcdac9
--- /dev/null
+++ b/nova/virt/hyperv/volumeutilsV2.py
@@ -0,0 +1,70 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories for Windows 2012
+"""
+import time
+
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import basevolumeutils
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
+
+ def __init__(self, conn_storage, conn_wmi):
+ self._conn_storage = conn_storage
+ self._conn_wmi = conn_wmi
+
+ def login_storage_target(self, target_lun, target_iqn,
+ target_portal):
+ """Add target portal, list targets and logins to the target."""
+ separator = target_portal.find(':')
+ target_address = target_portal[:separator]
+ target_port = target_portal[separator + 1:]
+ #Adding target portal to iscsi initiator. Sending targets
+ portal = self._conn_storage.__getattr__("MSFT_iSCSITargetPortal")
+ portal.New(TargetPortalAddress=target_address,
+ TargetPortalPortNumber=target_port)
+ #Connecting to the target
+ target = self._conn_storage.__getattr__("MSFT_iSCSITarget")
+ target.Connect(NodeAddress=target_iqn,
+ IsPersistent=True)
+ #Waiting the disk to be mounted. Research this
+ time.sleep(CONF.hyperv_wait_between_attach_retry)
+
+ def logout_storage_target(self, target_iqn):
+ """Logs out storage target through its session id."""
+
+ target = self._conn_storage.MSFT_iSCSITarget(
+ NodeAddress=target_iqn)[0]
+ if target.IsConnected:
+ session = self._conn_storage.MSFT_iSCSISession(
+ TargetNodeAddress=target_iqn)[0]
+ if session.IsPersistent:
+ session.Unregister()
+ target.Disconnect()
+
+ def execute_log_out(self, session_id):
+ session = self._conn_wmi.MSiSCSIInitiator_SessionClass(
+ SessionId=session_id)[0]
+ self.logout_storage_target(session.TargetName)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index f0ed3ba68..f80c19999 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -24,9 +24,7 @@ Handling of VM disk images.
import os
import re
-from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -41,7 +39,7 @@ image_opts = [
help='Force backing images to raw format'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(image_opts)
@@ -178,13 +176,16 @@ class QemuImgInfo(object):
def qemu_img_info(path):
"""Return a object containing the parsed output from qemu-img info."""
+ if not os.path.exists(path):
+ return QemuImgInfo()
+
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
return QemuImgInfo(out)
def convert_image(source, dest, out_format):
- """Convert image to other format"""
+ """Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd)
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 4c3483cb9..6785c8823 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2012 Red Hat, Inc.
+# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -18,7 +18,11 @@
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
-and support conversion to/from XML
+and support conversion to/from XML. These classes are solely concerned
+by providing direct Object <-> XML document conversions. No policy or
+operational decisions should be made by code in these classes. Such
+policy belongs in the 'designer.py' module which provides simplified
+helpers for populating up config object instances.
"""
from nova import exception
@@ -106,6 +110,7 @@ class LibvirtConfigCapsHost(LibvirtConfigObject):
**kwargs)
self.cpu = None
+ self.uuid = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
@@ -115,10 +120,14 @@ class LibvirtConfigCapsHost(LibvirtConfigObject):
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
+ elif c.tag == "uuid":
+ self.uuid = c.text
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
+ if self.uuid:
+ caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
@@ -337,6 +346,101 @@ class LibvirtConfigGuestCPU(LibvirtConfigCPU):
return cpu
+class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
+ **kwargs)
+
+ self.mode = "sysinfo"
+
+ def format_dom(self):
+ smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
+ smbios.set("mode", self.mode)
+
+ return smbios
+
+
+class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
+ **kwargs)
+
+ self.type = "smbios"
+ self.bios_vendor = None
+ self.bios_version = None
+ self.system_manufacturer = None
+ self.system_product = None
+ self.system_version = None
+ self.system_serial = None
+ self.system_uuid = None
+
+ def format_dom(self):
+ sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
+
+ sysinfo.set("type", self.type)
+
+ bios = None
+ system = None
+
+ if self.bios_vendor is not None:
+ if bios is None:
+ bios = etree.Element("bios")
+ info = etree.Element("entry", name="vendor")
+ info.text = self.bios_vendor
+ bios.append(info)
+
+ if self.bios_version is not None:
+ if bios is None:
+ bios = etree.Element("bios")
+ info = etree.Element("entry", name="version")
+ info.text = self.bios_version
+ bios.append(info)
+
+ if self.system_manufacturer is not None:
+ if system is None:
+ system = etree.Element("system")
+ info = etree.Element("entry", name="manufacturer")
+ info.text = self.system_manufacturer
+ system.append(info)
+
+ if self.system_product is not None:
+ if system is None:
+ system = etree.Element("system")
+ info = etree.Element("entry", name="product")
+ info.text = self.system_product
+ system.append(info)
+
+ if self.system_version is not None:
+ if system is None:
+ system = etree.Element("system")
+ info = etree.Element("entry", name="version")
+ info.text = self.system_version
+ system.append(info)
+
+ if self.system_serial is not None:
+ if system is None:
+ system = etree.Element("system")
+ info = etree.Element("entry", name="serial")
+ info.text = self.system_serial
+ system.append(info)
+
+ if self.system_uuid is not None:
+ if system is None:
+ system = etree.Element("system")
+ info = etree.Element("entry", name="uuid")
+ info.text = self.system_uuid
+ system.append(info)
+
+ if bios is not None:
+ sysinfo.append(bios)
+ if system is not None:
+ sysinfo.append(system)
+
+ return sysinfo
+
+
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
@@ -449,6 +553,7 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
self.vportparams = []
self.filtername = None
self.filterparams = []
+ self.driver_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
@@ -457,16 +562,22 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
+
+ if self.driver_name:
+ dev.append(etree.Element("driver", name=self.driver_name))
+
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
- dev.append(etree.Element("target", dev=self.target_dev))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode="private"))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
+ if self.target_dev is not None:
+ dev.append(etree.Element("target", dev=self.target_dev))
+
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
@@ -585,7 +696,9 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.vcpus = 1
self.cpu = None
self.acpi = False
+ self.apic = False
self.clock = None
+ self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_kernel = None
@@ -594,6 +707,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.os_root = None
self.os_init_path = None
self.os_boot_dev = None
+ self.os_smbios = None
self.devices = []
def _format_basic_props(self, root):
@@ -619,12 +733,17 @@ class LibvirtConfigGuest(LibvirtConfigObject):
os.append(self._text_node("init", self.os_init_path))
if self.os_boot_dev is not None:
os.append(etree.Element("boot", dev=self.os_boot_dev))
+ if self.os_smbios is not None:
+ os.append(self.os_smbios.format_dom())
root.append(os)
def _format_features(self, root):
- if self.acpi:
+ if self.acpi or self.apic:
features = etree.Element("features")
- features.append(etree.Element("acpi"))
+ if self.acpi:
+ features.append(etree.Element("acpi"))
+ if self.apic:
+ features.append(etree.Element("apic"))
root.append(features)
def _format_devices(self, root):
@@ -641,6 +760,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
root.set("type", self.virt_type)
self._format_basic_props(root)
+
+ if self.sysinfo is not None:
+ root.append(self.sysinfo.format_dom())
+
self._format_os(root)
self._format_features(root)
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
new file mode 100644
index 000000000..b832db4fa
--- /dev/null
+++ b/nova/virt/libvirt/designer.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Policy based configuration of libvirt objects
+
+This module provides helper APIs for populating the config.py
+classes based on common operational needs / policies
+"""
+
+from nova.virt import netutils
+
+
+def set_vif_guest_frontend_config(conf, mac, model, driver):
+ """Populate a LibvirtConfigGuestInterface instance
+ with guest frontend details"""
+ conf.mac_addr = mac
+ if model is not None:
+ conf.model = model
+ if driver is not None:
+ conf.driver_name = driver
+
+
+def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for a software bridge"""
+ conf.net_type = "bridge"
+ conf.source_dev = brname
+ if tapname:
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_ethernet_config(conf, tapname):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an externally configured
+ host device.
+
+ NB use of this configuration is discouraged by
+ libvirt project and will mark domains as 'tainted'"""
+
+ conf.net_type = "ethernet"
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an OpenVSwitch bridge"""
+
+ conf.net_type = "bridge"
+ conf.source_dev = brname
+ conf.vporttype = "openvswitch"
+ conf.add_vport_param("interfaceid", interfaceid)
+ if tapname:
+ conf.target_dev = tapname
+ conf.script = ""
+
+
+def set_vif_host_backend_filter_config(conf, name,
+ primary_addr,
+ dhcp_server=None,
+ ra_server=None,
+ allow_same_net=False,
+ ipv4_cidr=None,
+ ipv6_cidr=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for traffic filtering"""
+
+ conf.filtername = name
+ conf.add_filter_param("IP", primary_addr)
+
+ if dhcp_server:
+ conf.add_filter_param("DHCPSERVER", dhcp_server)
+
+ if ra_server:
+ conf.add_filter_param("RASERVER", ra_server)
+
+ if allow_same_net:
+ if ipv4_cidr:
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ conf.add_filter_param("PROJNET", net)
+ conf.add_filter_param("PROJMASK", mask)
+
+ if ipv6_cidr:
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ conf.add_filter_param("PROJNET6", net)
+ conf.add_filter_param("PROJMASK6", prefix)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 7f962b45d..4312086a8 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -43,7 +43,6 @@ import errno
import functools
import glob
import hashlib
-import multiprocessing
import os
import shutil
import sys
@@ -57,13 +56,11 @@ from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
-from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_mode
-from nova import config
from nova import context as nova_context
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import excutils
@@ -71,7 +68,9 @@ from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova.openstack.common.notifier import api as notifier
from nova import utils
+from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
@@ -149,7 +148,8 @@ libvirt_opts = [
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
- 'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver'
+ 'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
+ 'nfs=nova.virt.libvirt.volume_nfs.NfsVolumeDriver'
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
@@ -183,10 +183,17 @@ libvirt_opts = [
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
+ cfg.StrOpt('xen_hvmloader_path',
+ default='/usr/lib/xen/boot/hvmloader',
+ help='Location where the Xen hvmloader is kept'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
@@ -194,6 +201,8 @@ DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
+MAX_CONSOLE_BYTES = 102400
+
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
@@ -254,6 +263,7 @@ class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
@@ -268,7 +278,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._wrapped_conn = None
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
- default=DEFAULT_FIREWALL_DRIVER,
+ DEFAULT_FIREWALL_DRIVER,
+ self.virtapi,
get_connection=self._get_connection)
self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
self.volume_drivers = {}
@@ -393,10 +404,22 @@ class LibvirtDriver(driver.ComputeDriver):
_connect_auth_cb,
None]
- if read_only:
- return libvirt.openReadOnly(uri)
- else:
- return libvirt.openAuth(uri, auth, 0)
+ try:
+ if read_only:
+ return libvirt.openReadOnly(uri)
+ else:
+ return libvirt.openAuth(uri, auth, 0)
+ except libvirt.libvirtError as ex:
+ LOG.exception(_("Connection to libvirt failed: %s"), ex)
+ payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
+ method='_connect',
+ reason=ex)
+ notifier.notify(nova_context.get_admin_context(),
+ notifier.publisher_id('compute'),
+ 'compute.libvirt.error',
+ notifier.ERROR,
+ payload)
+ pass
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
@@ -410,6 +433,9 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.NovaException:
return False
+ def legacy_nwinfo(self):
+ return True
+
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
@@ -427,8 +453,17 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError:
# Instance was deleted while listing... ignore it
pass
+
+ # extend instance list to contain also defined domains
+ names.extend([vm for vm in self._conn.listDefinedDomains()
+ if vm not in names])
+
return names
+ def list_instance_uuids(self):
+ return [self._conn.lookupByName(name).UUIDString()
+ for name in self.list_instances()]
+
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
@@ -486,12 +521,13 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.LoopingCall(_wait_for_destroy)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_destroy)
timer.start(interval=0.5).wait()
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
self._destroy(instance)
- self._cleanup(instance, network_info, block_device_info)
+ self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
@@ -524,7 +560,8 @@ class LibvirtDriver(driver.ComputeDriver):
locals(), instance=instance)
raise
- def _cleanup(self, instance, network_info, block_device_info):
+ def _cleanup(self, instance, network_info, block_device_info,
+ destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
try:
@@ -548,35 +585,31 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- target = os.path.join(CONF.instances_path, instance['name'])
- LOG.info(_('Deleting instance files %(target)s') % locals(),
- instance=instance)
- if CONF.libvirt_type == 'lxc':
- container_dir = os.path.join(CONF.instances_path,
- instance['name'],
- 'rootfs')
- disk.destroy_container(container_dir=container_dir)
- if os.path.exists(target):
- # If we fail to get rid of the directory
- # tree, this shouldn't block deletion of
- # the instance as whole.
- try:
- shutil.rmtree(target)
- except OSError, e:
- LOG.error(_("Failed to cleanup directory %(target)s: %(e)s") %
- locals())
+ if destroy_disks:
+ target = libvirt_utils.get_instance_path(instance)
+ LOG.info(_('Deleting instance files %(target)s') % locals(),
+ instance=instance)
+ if os.path.exists(target):
+ # If we fail to get rid of the directory
+ # tree, this shouldn't block deletion of
+ # the instance as whole.
+ try:
+ shutil.rmtree(target)
+ except OSError, e:
+ LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
+ ) % locals())
- #NOTE(bfilippov): destroy all LVM disks for this instance
- self._cleanup_lvm(instance)
+ #NOTE(bfilippov): destroy all LVM disks for this instance
+ self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
- """Delete all LVM disks for given instance object"""
+ """Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
- """Returns all LVM disks for given instance object"""
+ """Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
@@ -609,8 +642,7 @@ class LibvirtDriver(driver.ComputeDriver):
}
def _cleanup_resize(self, instance, network_info):
- target = os.path.join(CONF.instances_path,
- instance['name'] + "_resize")
+ target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
@@ -628,49 +660,40 @@ class LibvirtDriver(driver.ComputeDriver):
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
- @exception.wrap_exception()
- def attach_volume(self, connection_info, instance_name, mountpoint):
+ def attach_volume(self, connection_info, instance, mountpoint):
+ instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
conf = self.volume_driver_method('connect_volume',
connection_info,
mount_device)
- if CONF.libvirt_type == 'lxc':
- self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
- # TODO(danms) once libvirt has support for LXC hotplug,
- # replace this re-define with use of the
- # VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
- # attachDevice()
- domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
- self._conn.defineXML(domxml)
- else:
- try:
- # NOTE(vish): We can always affect config because our
- # domains are persistent, but we should only
- # affect live if the domain is running.
- flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
- state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
- if state == power_state.RUNNING:
- flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
- virt_dom.attachDeviceFlags(conf.to_xml(), flags)
- except Exception, ex:
- if isinstance(ex, libvirt.libvirtError):
- errcode = ex.get_error_code()
- if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
- self.volume_driver_method('disconnect_volume',
- connection_info,
- mount_device)
- raise exception.DeviceIsBusy(device=mount_device)
-
- with excutils.save_and_reraise_exception():
+ try:
+ # NOTE(vish): We can always affect config because our
+ # domains are persistent, but we should only
+ # affect live if the domain is running.
+ flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
+ state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
+ if state == power_state.RUNNING:
+ flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
+ virt_dom.attachDeviceFlags(conf.to_xml(), flags)
+ except Exception, ex:
+ if isinstance(ex, libvirt.libvirtError):
+ errcode = ex.get_error_code()
+ if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
- connection_info,
- mount_device)
+ connection_info,
+ mount_device)
+ raise exception.DeviceIsBusy(device=mount_device)
+
+ with excutils.save_and_reraise_exception():
+ self.volume_driver_method('disconnect_volume',
+ connection_info,
+ mount_device)
@staticmethod
def _get_disk_xml(xml, device):
- """Returns the xml for the disk mounted at device"""
+ """Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
@@ -691,22 +714,14 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info)
return xml
- @exception.wrap_exception()
- def detach_volume(self, connection_info, instance_name, mountpoint):
+ def detach_volume(self, connection_info, instance, mountpoint):
+ instance_name = instance['name']
mount_device = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
raise exception.DiskNotFound(location=mount_device)
- if CONF.libvirt_type == 'lxc':
- self._detach_lxc_volume(xml, virt_dom, instance_name)
- # TODO(danms) once libvirt has support for LXC hotplug,
- # replace this re-define with use of the
- # VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
- # detachDevice()
- domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
- self._conn.defineXML(domxml)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
@@ -731,56 +746,7 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- @exception.wrap_exception()
- def _attach_lxc_volume(self, xml, virt_dom, instance_name):
- LOG.info(_('attaching LXC block device'))
-
- lxc_container_root = self.get_lxc_container_root(virt_dom)
- lxc_host_volume = self.get_lxc_host_device(xml)
- lxc_container_device = self.get_lxc_container_target(xml)
- lxc_container_target = "%s/%s" % (lxc_container_root,
- lxc_container_device)
-
- if lxc_container_target:
- disk.bind(lxc_host_volume, lxc_container_target, instance_name)
-
- @exception.wrap_exception()
- def _detach_lxc_volume(self, xml, virt_dom, instance_name):
- LOG.info(_('detaching LXC block device'))
-
- lxc_container_root = self.get_lxc_container_root(virt_dom)
- lxc_container_device = self.get_lxc_container_target(xml)
- lxc_container_target = "%s/%s" % (lxc_container_root,
- lxc_container_device)
-
- if lxc_container_target:
- disk.unbind(lxc_container_target)
-
- @staticmethod
- def get_lxc_container_root(virt_dom):
- xml = virt_dom.XMLDesc(0)
- doc = etree.fromstring(xml)
- filesystem_block = doc.findall('./devices/filesystem')
- for cnt, filesystem_nodes in enumerate(filesystem_block):
- return filesystem_nodes[cnt].get('dir')
-
- @staticmethod
- def get_lxc_host_device(xml):
- dom = minidom.parseString(xml)
-
- for device in dom.getElementsByTagName('source'):
- return device.getAttribute('dev')
-
- @staticmethod
- def get_lxc_container_target(xml):
- dom = minidom.parseString(xml)
-
- for device in dom.getElementsByTagName('target'):
- filesystem = device.getAttribute('dev')
- return 'dev/%s' % filesystem
-
- @exception.wrap_exception()
- def snapshot(self, context, instance, image_href):
+ def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
@@ -788,7 +754,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
- raise exception.InstanceNotRunning()
+ raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
@@ -848,6 +814,7 @@ class LibvirtDriver(driver.ComputeDriver):
image_type=source_format)
snapshot.create()
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
# Export the snapshot to a raw image
snapshot_directory = CONF.libvirt_snapshots_directory
@@ -865,13 +832,15 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain(domain=virt_dom)
# Upload that image to the image service
+
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
- @exception.wrap_exception()
def reboot(self, instance, network_info, reboot_type='SOFT',
block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
@@ -917,7 +886,8 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
return True
greenthread.sleep(1)
@@ -954,55 +924,49 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.LoopingCall(_wait_for_reboot)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
- @exception.wrap_exception()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
- @exception.wrap_exception()
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._destroy(instance)
- @exception.wrap_exception()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom)
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def suspend(self, instance):
- """Suspend the specified instance"""
+ """Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
- @exception.wrap_exception()
- def resume(self, instance):
- """resume the specified instance"""
- dom = self._lookup_by_name(instance['name'])
- self._create_domain(domain=dom)
+ def resume(self, instance, network_info, block_device_info=None):
+ """resume the specified instance."""
+ xml = self._get_domain_xml(instance, network_info, block_device_info)
+ self._create_domain_and_network(xml, instance, network_info,
+ block_device_info)
- @exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- @exception.wrap_exception()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
@@ -1013,11 +977,9 @@ class LibvirtDriver(driver.ComputeDriver):
data recovery.
"""
-
+ instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_domain_xml(instance, network_info)
- unrescue_xml_path = os.path.join(CONF.instances_path,
- instance['name'],
- 'unrescue.xml')
+ unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
@@ -1033,31 +995,23 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
self._create_domain(xml)
- @exception.wrap_exception()
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
- unrescue_xml_path = os.path.join(CONF.instances_path,
- instance['name'],
- 'unrescue.xml')
+ instance_dir = libvirt_utils.get_instance_path(instance)
+ unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
- rescue_files = os.path.join(CONF.instances_path, instance['name'],
- "*.rescue")
+ rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
- @exception.wrap_exception()
def poll_rebooting_instances(self, timeout, instances):
pass
- @exception.wrap_exception()
- def poll_rescued_instances(self, timeout):
- pass
-
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
@@ -1069,12 +1023,13 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
- @exception.wrap_exception()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
xml = self.to_xml(instance, network_info, image_meta,
block_device_info=block_device_info)
- self._create_image(context, instance, xml, network_info=network_info,
+ if image_meta:
+ self._create_image(context, instance, xml,
+ network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
@@ -1091,7 +1046,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
raise utils.LoopingCallDone()
- timer = utils.LoopingCall(_wait_for_boot)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
@@ -1108,7 +1063,6 @@ class LibvirtDriver(driver.ComputeDriver):
fp.write(data)
return fpath
- @exception.wrap_exception()
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
@@ -1136,7 +1090,14 @@ class LibvirtDriver(driver.ComputeDriver):
if not path:
continue
libvirt_utils.chown(path, os.getuid())
- return libvirt_utils.load_file(path)
+
+ with libvirt_utils.file_open(path, 'rb') as fp:
+ log_data, remaining = utils.last_bytes(fp,
+ MAX_CONSOLE_BYTES)
+ if remaining > 0:
+ LOG.info(_('Truncated console log returned, %d bytes '
+ 'ignored'), remaining, instance=instance)
+ return log_data
# Try 'pty' types
if console_types.get('pty'):
@@ -1152,18 +1113,22 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
- self._chown_console_log_for_instance(instance['name'])
+ self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
- console_log = self._get_console_log_path(instance['name'])
+ console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
- return libvirt_utils.load_file(fpath)
+ with libvirt_utils.file_open(fpath, 'rb') as fp:
+ log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
+ if remaining > 0:
+ LOG.info(_('Truncated console log returned, %d bytes ignored'),
+ remaining, instance=instance)
+ return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
- @exception.wrap_exception()
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
@@ -1219,7 +1184,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
- """Create a blank image of specified size"""
+ """Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
@@ -1235,17 +1200,17 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb):
- """Create a swap file of specified size"""
+ """Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
- def _get_console_log_path(instance_name):
- return os.path.join(CONF.instances_path, instance_name,
- 'console.log')
+ def _get_console_log_path(instance):
+ return os.path.join(libvirt_utils.get_instance_path(instance),
+ 'console.log')
- def _chown_console_log_for_instance(self, instance_name):
- console_log = self._get_console_log_path(instance_name)
+ def _chown_console_log_for_instance(self, instance):
+ console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
@@ -1257,12 +1222,11 @@ class LibvirtDriver(driver.ComputeDriver):
# syntactic nicety
def basepath(fname='', suffix=suffix):
- return os.path.join(CONF.instances_path,
- instance['name'],
+ return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
- return self.image_backend.image(instance['name'],
+ return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
@@ -1274,17 +1238,12 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_('Creating image'), instance=instance)
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
- if CONF.libvirt_type == 'lxc':
- container_dir = os.path.join(CONF.instances_path,
- instance['name'],
- 'rootfs')
- fileutils.ensure_tree(container_dir)
-
# NOTE(dprince): for rescue console.log may already exist... chown it.
- self._chown_console_log_for_instance(instance['name'])
+ self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
- libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
+ libvirt_utils.write_to_file(
+ self._get_console_log_path(instance), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
@@ -1311,8 +1270,7 @@ class LibvirtDriver(driver.ComputeDriver):
root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
size = instance['root_gb'] * 1024 * 1024 * 1024
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
+ inst_type = instance['instance_type']
if size == 0 or suffix == '.rescue':
size = None
@@ -1326,6 +1284,11 @@ class LibvirtDriver(driver.ComputeDriver):
user_id=instance['user_id'],
project_id=instance['project_id'])
+ # Lookup the filesystem type if required
+ os_type_with_default = instance['os_type']
+ if not os_type_with_default:
+ os_type_with_default = 'default'
+
ephemeral_gb = instance['ephemeral_gb']
if ephemeral_gb and not self._volume_in_mapping(
self.default_second_device, block_device_info):
@@ -1333,9 +1296,7 @@ class LibvirtDriver(driver.ComputeDriver):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
- fname = "ephemeral_%s_%s_%s" % ("0",
- ephemeral_gb,
- instance["os_type"])
+ fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
@@ -1349,9 +1310,7 @@ class LibvirtDriver(driver.ComputeDriver):
fs_label='ephemeral%d' % eph['num'],
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
- fname = "ephemeral_%s_%s_%s" % (eph['num'],
- eph['size'],
- instance["os_type"])
+ fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(_get_eph_disk(eph)).cache(fetch_func=fn,
filename=fname,
size=size,
@@ -1404,14 +1363,17 @@ class LibvirtDriver(driver.ComputeDriver):
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md)
- cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
- try:
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
- cdb.make_drive(configdrive_path)
- finally:
- cdb.cleanup()
+
+ try:
+ cdb.make_drive(configdrive_path)
+ except exception.ProcessExecutionError, e:
+ LOG.error(_('Creating config drive failed with error: %s'),
+ e, instance=instance)
+ raise
elif any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
@@ -1435,13 +1397,8 @@ class LibvirtDriver(driver.ComputeDriver):
'%(img_id)s (%(e)s)') % locals(),
instance=instance)
- if CONF.libvirt_type == 'lxc':
- disk.setup_container(basepath('disk'),
- container_dir=container_dir,
- use_cow=CONF.use_cow_images)
-
if CONF.libvirt_type == 'uml':
- libvirt_utils.chown(basepath('disk'), 'root')
+ libvirt_utils.chown(image('disk').path, 'root')
@staticmethod
def _volume_in_mapping(mount_device, block_device_info):
@@ -1470,6 +1427,11 @@ class LibvirtDriver(driver.ComputeDriver):
caps.parse_str(xmlstr)
return caps
+ def get_host_uuid(self):
+ """Returns a UUID representing the host."""
+ caps = self.get_host_capabilities()
+ return caps.host.uuid
+
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
@@ -1488,6 +1450,7 @@ class LibvirtDriver(driver.ComputeDriver):
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
+ guestcpu.features.append(guestfeat)
return guestcpu
@@ -1553,9 +1516,8 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
- fs.source_dir = os.path.join(CONF.instances_path,
- instance['name'],
- 'rootfs')
+ fs.source_dir = os.path.join(
+ libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if image_meta and image_meta.get('disk_format') == 'iso':
@@ -1573,8 +1535,7 @@ class LibvirtDriver(driver.ComputeDriver):
def disk_info(name, disk_dev, disk_bus=default_disk_bus,
device_type="disk"):
- image = self.image_backend.image(instance['name'],
- name)
+ image = self.image_backend.image(instance, name)
return image.libvirt_info(disk_bus,
disk_dev,
device_type,
@@ -1661,15 +1622,26 @@ class LibvirtDriver(driver.ComputeDriver):
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
- diskconfig.source_path = os.path.join(CONF.instances_path,
- instance['name'],
- "disk.config")
+ diskconfig.source_path = os.path.join(
+ libvirt_utils.get_instance_path(instane), "disk.config")
diskconfig.target_dev = self.default_last_device
diskconfig.target_bus = default_disk_bus
devices.append(diskconfig)
return devices
+ def get_guest_config_sysinfo(self, instance):
+ sysinfo = vconfig.LibvirtConfigGuestSysinfo()
+
+ sysinfo.system_manufacturer = version.vendor_string()
+ sysinfo.system_product = version.product_string()
+ sysinfo.system_version = version.version_string_with_package()
+
+ sysinfo.system_serial = self.get_host_uuid()
+ sysinfo.system_uuid = instance['uuid']
+
+ return sysinfo
+
def get_guest_config(self, instance, network_info, image_meta, rescue=None,
block_device_info=None):
"""Get config data for parameters.
@@ -1678,10 +1650,8 @@ class LibvirtDriver(driver.ComputeDriver):
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
- # FIXME(vish): stick this in db
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id,
- inactive=True)
+ inst_type = instance['instance_type']
+ inst_path = libvirt_utils.get_instance_path(instance)
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
@@ -1716,7 +1686,13 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
- guest.os_loader = '/usr/lib/xen/boot/hvmloader'
+ guest.os_loader = CONF.xen_hvmloader_path
+
+ if CONF.libvirt_type in ("kvm", "qemu"):
+ caps = self.get_host_capabilities()
+ if caps.host.cpu.arch in ("i686", "x86_64"):
+ guest.sysinfo = self.get_guest_config_sysinfo(instance)
+ guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
@@ -1734,31 +1710,30 @@ class LibvirtDriver(driver.ComputeDriver):
if rescue:
if rescue.get('kernel_id'):
- guest.os_kernel = os.path.join(CONF.instances_path,
- instance['name'],
- "kernel.rescue")
+ guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
+ if CONF.libvirt_type == "xen":
+ guest.os_cmdline = "ro"
+ else:
+ guest.os_cmdline = ("root=%s console=ttyS0" %
+ (root_device_name or "/dev/vda",))
+
if rescue.get('ramdisk_id'):
- guest.os_initrd = os.path.join(CONF.instances_path,
- instance['name'],
- "ramdisk.rescue")
+ guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
- guest.os_kernel = os.path.join(CONF.instances_path,
- instance['name'],
- "kernel")
+ guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
- guest.os_cmdline = "root=%s console=ttyS0" % (
- root_device_name or "/dev/vda",)
+ guest.os_cmdline = ("root=%s console=ttyS0" %
+ (root_device_name or "/dev/vda",))
if instance['ramdisk_id']:
- guest.os_initrd = os.path.join(CONF.instances_path,
- instance['name'],
- "ramdisk")
+ guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
+ guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
@@ -1788,7 +1763,9 @@ class LibvirtDriver(driver.ComputeDriver):
guest.add_device(cfg)
for (network, mapping) in network_info:
- cfg = self.vif_driver.plug(instance, (network, mapping))
+ self.vif_driver.plug(instance, (network, mapping))
+ cfg = self.vif_driver.get_config(instance,
+ network, mapping)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
@@ -1798,9 +1775,7 @@ class LibvirtDriver(driver.ComputeDriver):
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
- consolelog.source_path = os.path.join(CONF.instances_path,
- instance['name'],
- "console.log")
+ consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
@@ -1869,16 +1844,40 @@ class LibvirtDriver(driver.ComputeDriver):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
- def _create_domain(self, xml=None, domain=None, launch_flags=0):
+ def _create_domain(self, xml=None, domain=None,
+ instance=None, launch_flags=0):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
+ inst_path = None
+ if instance:
+ inst_path = libvirt_utils.get_instance_path(instance)
+
+ if CONF.libvirt_type == 'lxc':
+ if not inst_path:
+ inst_path = None
+
+ container_dir = os.path.join(inst_path, 'rootfs')
+ fileutils.ensure_tree(container_dir)
+ image = self.image_backend.image(instance, 'disk')
+ disk.setup_container(image.path,
+ container_dir=container_dir,
+ use_cow=CONF.use_cow_images)
+
if xml:
domain = self._conn.defineXML(xml)
domain.createWithFlags(launch_flags)
self._enable_hairpin(domain.XMLDesc(0))
+
+ # NOTE(uni): Now the container is running with its own private mount
+ # namespace and so there is no need to keep the container rootfs
+ # mounted in the host namespace
+ if CONF.libvirt_type == 'lxc':
+ container_dir = os.path.join(inst_path, 'rootfs')
+ disk.teardown_container(container_dir=container_dir)
+
return domain
def _create_domain_and_network(self, xml, instance, network_info,
@@ -1898,7 +1897,8 @@ class LibvirtDriver(driver.ComputeDriver):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- domain = self._create_domain(xml)
+ domain = self._create_domain(xml, instance=instance)
+
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
@@ -1971,21 +1971,18 @@ class LibvirtDriver(driver.ComputeDriver):
return interfaces
- @staticmethod
- def get_vcpu_total():
+ def get_vcpu_total(self):
"""Get vcpu number of physical computer.
:returns: the number of cpu core.
"""
- # On certain platforms, this will raise a NotImplementedError.
try:
- return multiprocessing.cpu_count()
- except NotImplementedError:
+ return self._conn.getInfo()[2]
+ except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
- "function is not implemented for this platform. "
- "This error can be safely ignored for now."))
+ "function is not implemented for this platform. "))
return 0
def get_memory_mb_total(self):
@@ -2008,26 +2005,42 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['total'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_total = libvirt_utils.volume_group_total_space(
+ CONF.libvirt_images_volume_group)
+ return vg_total / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
+ """Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
- for dom_id in self.list_instance_ids():
- dom = self._conn.lookupByID(dom_id)
- vcpus = dom.vcpus()
- if vcpus is None:
- # dom.vcpus is not implemented for lxc, but returning 0 for
- # a used count is hardly useful for something measuring usage
- total += 1
- else:
- total += len(vcpus[1])
+ dom_ids = self.list_instance_ids()
+ for dom_id in dom_ids:
+ try:
+ dom = self._conn.lookupByID(dom_id)
+ vcpus = dom.vcpus()
+ if vcpus is None:
+ # dom.vcpus is not implemented for lxc, but returning 0 for
+ # a used count is hardly useful for something measuring
+ # usage
+ total += 1
+ else:
+ total += len(vcpus[1])
+ except libvirt.libvirtError as err:
+ if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ LOG.debug(_("List of domains returned by libVirt: %s")
+ % dom_ids)
+ LOG.warn(_("libVirt can't find a domain with id: %s")
+ % dom_id)
+ continue
+ raise
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
@@ -2077,8 +2090,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['used'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_used = libvirt_utils.volume_group_used_space(
+ CONF.libvirt_images_volume_group)
+ return vg_used / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
"""Get hypervisor type.
@@ -2170,12 +2188,50 @@ class LibvirtDriver(driver.ComputeDriver):
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
+ def get_all_volume_usage(self, context, compute_host_bdms):
+ """Return usage info for volumes attached to vms on
+ a given host"""
+ vol_usage = []
+
+ for instance_bdms in compute_host_bdms:
+ instance = instance_bdms['instance']
+
+ for bdm in instance_bdms['instance_bdms']:
+ vol_stats = []
+ mountpoint = bdm['device_name']
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+
+ LOG.debug(_("Trying to get stats for the volume %s"),
+ bdm['volume_id'])
+ vol_stats = self.block_stats(instance['name'], mountpoint)
+
+ if vol_stats:
+ rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
+ vol_usage.append(dict(volume=bdm['volume_id'],
+ instance=instance,
+ rd_req=rd_req,
+ rd_bytes=rd_bytes,
+ wr_req=wr_req,
+ wr_bytes=wr_bytes,
+ flush_operations=flush_ops))
+ return vol_usage
+
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
- domain = self._lookup_by_name(instance_name)
- return domain.blockStats(disk)
+ try:
+ domain = self._lookup_by_name(instance_name)
+ return domain.blockStats(disk)
+ except libvirt.libvirtError as e:
+ errcode = e.get_error_code()
+ LOG.info(_("Getting block stats failed, device might have "
+ "been detached. Code=%(errcode)s Error=%(e)s")
+ % locals())
+ except exception.InstanceNotFound:
+ LOG.info(_("Could not find domain in libvirt for instance %s. "
+ "Cannot get block stats for device") % instance_name)
def interface_stats(self, instance_name, interface):
"""
@@ -2204,12 +2260,13 @@ class LibvirtDriver(driver.ComputeDriver):
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
+ :param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
dic = {'vcpus': self.get_vcpu_total(),
@@ -2283,6 +2340,7 @@ class LibvirtDriver(driver.ComputeDriver):
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
+ is_volume_backed = dest_check_data.get('is_volume_backed', False)
shared = self._check_shared_storage_test_file(filename)
@@ -2295,10 +2353,12 @@ class LibvirtDriver(driver.ComputeDriver):
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
- elif not shared:
+ elif not shared and not is_volume_backed:
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
+ dest_check_data.update({"is_shared_storage": shared})
+ return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance_ref,
available_mb, disk_over_commit):
@@ -2350,6 +2410,11 @@ class LibvirtDriver(driver.ComputeDriver):
None. if given cpu info is not compatible to this server,
raise exception.
"""
+
+ # NOTE(berendt): virConnectCompareCPU not working for Xen
+ if CONF.libvirt_type == 'xen':
+ return 1
+
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
@@ -2360,7 +2425,7 @@ class LibvirtDriver(driver.ComputeDriver):
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
- cpu.add_feature(config.LibvirtConfigCPUFeature(f))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
@@ -2373,11 +2438,11 @@ class LibvirtDriver(driver.ComputeDriver):
raise
if ret <= 0:
- LOG.error(reason=m % locals())
+ LOG.error(m % locals())
raise exception.InvalidCPUInfo(reason=m % locals())
def _create_shared_storage_test_file(self):
- """Makes tmpfile under CONF.instance_path."""
+ """Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
@@ -2461,10 +2526,12 @@ class LibvirtDriver(driver.ComputeDriver):
"""
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
- post_method, recover_method, block_migration)
+ post_method, recover_method, block_migration,
+ migrate_data)
def _live_migration(self, ctxt, instance_ref, dest, post_method,
- recover_method, block_migration=False):
+ recover_method, block_migration=False,
+ migrate_data=None):
"""Do live migration.
:params ctxt: security context
@@ -2478,7 +2545,7 @@ class LibvirtDriver(driver.ComputeDriver):
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
-
+ :params migrate_data: implementation specific params
"""
# Do live migration.
@@ -2503,22 +2570,65 @@ class LibvirtDriver(driver.ComputeDriver):
recover_method(ctxt, instance_ref, dest, block_migration)
# Waiting for completion of live_migration.
- timer = utils.LoopingCall(f=None)
+ timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
- """waiting for live migration completion"""
+ """waiting for live migration completion."""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
timer.stop()
- post_method(ctxt, instance_ref, dest, block_migration)
+ post_method(ctxt, instance_ref, dest, block_migration,
+ migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
+ def _fetch_instance_kernel_ramdisk(self, context, instance):
+ """Download kernel and ramdisk for instance in instance directory."""
+ instance_dir = libvirt_utils.get_instance_path(instance)
+ if instance['kernel_id']:
+ libvirt_utils.fetch_image(context,
+ os.path.join(instance_dir, 'kernel'),
+ instance['kernel_id'],
+ instance['user_id'],
+ instance['project_id'])
+ if instance['ramdisk_id']:
+ libvirt_utils.fetch_image(context,
+ os.path.join(instance_dir,
+ 'ramdisk'),
+ instance['ramdisk_id'],
+ instance['user_id'],
+ instance['project_id'])
+
def pre_live_migration(self, context, instance_ref, block_device_info,
- network_info):
+ network_info, migrate_data=None):
"""Preparation live migration."""
+ # Steps for volume backed instance live migration w/o shared storage.
+ is_shared_storage = True
+ is_volume_backed = False
+ is_block_migration = True
+ if migrate_data:
+ is_shared_storage = migrate_data.get('is_shared_storage', True)
+ is_volume_backed = migrate_data.get('is_volume_backed', False)
+ is_block_migration = migrate_data.get('block_migration', True)
+
+ if is_volume_backed and not (is_block_migration or is_shared_storage):
+
+ # Create the instance directory on destination compute node.
+ instance_dir = libvirt_utils.get_instance_path(instance_ref)
+ if os.path.exists(instance_dir):
+ raise exception.DestinationDiskExists(path=instance_dir)
+ os.mkdir(instance_dir)
+
+ # Touch the console.log file, required by libvirt.
+ console_file = self._get_console_log_path(instance_ref)
+ libvirt_utils.file_open(console_file, 'a').close()
+
+ # if image has kernel and ramdisk, just download
+ # following normal way.
+ self._fetch_instance_kernel_ramdisk(context, instance_ref)
+
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
@@ -2561,7 +2671,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = jsonutils.loads(disk_info_json)
# make instance directory
- instance_dir = os.path.join(CONF.instances_path, instance['name'])
+ instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
@@ -2580,7 +2690,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Remove any size tags which the cache manages
cache_name = cache_name.split('_')[0]
- image = self.image_backend.image(instance['name'],
+ image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
@@ -2593,19 +2703,7 @@ class LibvirtDriver(driver.ComputeDriver):
# if image has kernel and ramdisk, just download
# following normal way.
- if instance['kernel_id']:
- libvirt_utils.fetch_image(ctxt,
- os.path.join(instance_dir, 'kernel'),
- instance['kernel_id'],
- instance['user_id'],
- instance['project_id'])
- if instance['ramdisk_id']:
- libvirt_utils.fetch_image(ctxt,
- os.path.join(instance_dir,
- 'ramdisk'),
- instance['ramdisk_id'],
- instance['user_id'],
- instance['project_id'])
+ self._fetch_instance_kernel_ramdisk(ctxt, instance)
def post_live_migration_at_destination(self, ctxt,
instance_ref,
@@ -2623,8 +2721,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
- instance_dir = os.path.join(CONF.instances_path,
- instance_ref["name"])
+ instance_dir = libvirt_utils.get_instance_path(instance_ref)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
# libvirt.xml
@@ -2742,19 +2839,6 @@ class LibvirtDriver(driver.ComputeDriver):
If 'refresh' is True, run update the stats first."""
return self.host_state.get_host_stats(refresh=refresh)
- def host_power_action(self, host, action):
- """Reboots, shuts down or powers up the host."""
- raise NotImplementedError()
-
- def host_maintenance_mode(self, host, mode):
- """Start/Stop host maintenance window. On start, it triggers
- guest VMs evacuation."""
- raise NotImplementedError()
-
- def set_host_enabled(self, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- pass
-
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
@@ -2767,7 +2851,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
- """Used only for cleanup in case migrate_disk_and_power_off fails"""
+ """Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
@@ -2776,7 +2860,6 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
pass
- @exception.wrap_exception()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
@@ -2800,7 +2883,7 @@ class LibvirtDriver(driver.ComputeDriver):
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
- inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
+ inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
@@ -2842,7 +2925,6 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_("Instance running successfully."), instance=instance)
raise utils.LoopingCallDone()
- @exception.wrap_exception()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
@@ -2891,16 +2973,16 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
- @exception.wrap_exception()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
- inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
+ inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
@@ -2909,17 +2991,17 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
- timer = utils.LoopingCall(self._wait_for_running, instance)
+ timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
+ instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
- """ get the list of io devices from the
- xml document."""
+ """get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
@@ -2999,14 +3081,22 @@ class LibvirtDriver(driver.ComputeDriver):
"""Remove a compute host from an aggregate."""
pass
- def undo_aggregate_operation(self, context, op, aggregate_id,
+ def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """only used for Resource Pools"""
+ """only used for Resource Pools."""
pass
+ def instance_on_disk(self, instance):
+ # ensure directories exist and are writable
+ instance_path = libvirt_utils.get_instance_path(instance)
+ LOG.debug(_('Checking instance files accessability'
+ '%(instance_path)s')
+ % locals())
+ return os.access(instance_path, os.W_OK)
+
class HostState(object):
- """Manages information about the compute node through libvirt"""
+ """Manages information about the compute node through libvirt."""
def __init__(self, virtapi, read_only):
super(HostState, self).__init__()
self.read_only = read_only
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index ad14f21de..c47056ff2 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -20,13 +20,14 @@
from eventlet import tpool
-from nova import config
-from nova import flags
+from nova.cloudpipe import pipelib
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
try:
import libvirt
@@ -44,13 +45,14 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
spoofing, IP spoofing, and ARP spoofing.
"""
- def __init__(self, get_connection, **kwargs):
+ def __init__(self, virtapi, get_connection, **kwargs):
+ super(NWFilterFirewall, self).__init__(virtapi)
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
@@ -98,7 +100,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
</filter>'''
def setup_basic_filtering(self, instance, network_info):
- """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_('Called setup_basic_filtering in nwfilter'),
instance=instance)
@@ -115,7 +117,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if mapping['dhcp_server']:
allow_dhcp = True
break
- if instance['image_ref'] == str(CONF.vpn_image_id):
+ if pipelib.is_vpn_image(instance['image_ref']):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
@@ -203,7 +205,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
@@ -219,9 +221,9 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
- def __init__(self, execute=None, **kwargs):
- super(IptablesFirewallDriver, self).__init__(**kwargs)
- self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+ def __init__(self, virtapi, execute=None, **kwargs):
+ super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
+ self.nwfilter = NWFilterFirewall(virtapi, kwargs['get_connection'])
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
@@ -233,7 +235,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.basicly_filtered = True
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
@@ -250,5 +252,5 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 7152e3e2d..d272e408c 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -19,8 +19,6 @@ import abc
import contextlib
import os
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
@@ -47,8 +45,9 @@ __imagebackend_opts = [
' if this flag is set to True.'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
+CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
class Image(object):
@@ -122,7 +121,7 @@ class Image(object):
fetch_func(target=target, *args, **kwargs)
if not os.path.exists(self.path):
- base_dir = os.path.join(CONF.instances_path, '_base')
+ base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
@@ -143,8 +142,9 @@ class Raw(Image):
def __init__(self, instance=None, name=None, path=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
- self.path = path or os.path.join(CONF.instances_path,
- instance, name)
+ self.path = (path or
+ os.path.join(libvirt_utils.get_instance_path(instance),
+ name))
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -171,22 +171,17 @@ class Qcow2(Image):
def __init__(self, instance=None, name=None, path=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
- self.path = path or os.path.join(CONF.instances_path,
- instance, name)
+ self.path = (path or
+ os.path.join(libvirt_utils.get_instance_path(instance),
+ name))
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
lock_path=self.lock_path)
def copy_qcow2_image(base, target, size):
- qcow2_base = base
+ libvirt_utils.create_cow_image(base, target)
if size:
- size_gb = size / (1024 * 1024 * 1024)
- qcow2_base += '_%d' % size_gb
- if not os.path.exists(qcow2_base):
- with utils.remove_path_on_error(qcow2_base):
- libvirt_utils.copy_image(base, qcow2_base)
- disk.extend(qcow2_base, size)
- libvirt_utils.create_cow_image(qcow2_base, target)
+ disk.extend(target, size)
prepare_template(target=base, *args, **kwargs)
with utils.remove_path_on_error(self.path):
@@ -215,7 +210,7 @@ class Lvm(Image):
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
- self.lv = '%s_%s' % (self.escape(instance),
+ self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(name))
self.path = os.path.join('/dev', self.vg, self.lv)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 961309929..50fac9bb4 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -23,15 +23,17 @@ http://wiki.openstack.org/nova-image-cache-management.
"""
import hashlib
+import json
import os
import re
import time
from nova.compute import task_states
from nova.compute import vm_states
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import fileutils
+from nova.openstack.common import jsonutils
+from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.libvirt import utils as virtutils
@@ -40,6 +42,15 @@ from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
imagecache_opts = [
+ cfg.StrOpt('base_dir_name',
+ default='_base',
+ help="Where cached images are stored under $instances_path."
+ "This is NOT the full path - just a folder name."
+ "For per-compute-host cached images, set to _base_$my_ip"),
+ cfg.StrOpt('image_info_filename_pattern',
+ default='$instances_path/$base_dir_name/%(image)s.info',
+ help='Allows image information files to be stored in '
+ 'non-standard locations'),
cfg.BoolOpt('remove_unused_base_images',
default=True,
help='Should unused base images be removed?'),
@@ -54,35 +65,154 @@ imagecache_opts = [
cfg.BoolOpt('checksum_base_images',
default=False,
help='Write a checksum for files in _base to disk'),
+ cfg.IntOpt('checksum_interval_seconds',
+ default=3600,
+ help='How frequently to checksum base images'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(imagecache_opts)
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('instances_path', 'nova.compute.manager')
-CONF.import_opt('base_dir_name', 'nova.compute.manager')
-def read_stored_checksum(target):
+def get_info_filename(base_path):
+ """Construct a filename for storing addtional information about a base
+ image.
+
+ Returns a filename.
+ """
+
+ base_file = os.path.basename(base_path)
+ return (CONF.image_info_filename_pattern
+ % {'image': base_file})
+
+
+def is_valid_info_file(path):
+ """Test if a given path matches the pattern for info files."""
+
+ digest_size = hashlib.sha1().digestsize * 2
+ regexp = (CONF.image_info_filename_pattern
+ % {'image': ('([0-9a-f]{%(digest_size)d}|'
+ '[0-9a-f]{%(digest_size)d}_sm|'
+ '[0-9a-f]{%(digest_size)d}_[0-9]+)'
+ % {'digest_size': digest_size})})
+ m = re.match(regexp, path)
+ if m:
+ return True
+ return False
+
+
+def _read_possible_json(serialized, info_file):
+ try:
+ d = jsonutils.loads(serialized)
+
+ except ValueError, e:
+ LOG.error(_('Error reading image info file %(filename)s: '
+ '%(error)s'),
+ {'filename': info_file,
+ 'error': e})
+ d = {}
+
+ return d
+
+
+def read_stored_info(target, field=None, timestamped=False):
+ """Read information about an image.
+
+ Returns an empty dictionary if there is no info, just the field value if
+ a field is requested, or the entire dictionary otherwise.
+ """
+
+ info_file = get_info_filename(target)
+ if not os.path.exists(info_file):
+ # NOTE(mikal): Special case to handle essex checksums being converted.
+ # There is an assumption here that target is a base image filename.
+ old_filename = target + '.sha1'
+ if field == 'sha1' and os.path.exists(old_filename):
+ hash_file = open(old_filename)
+ hash_value = hash_file.read()
+ hash_file.close()
+
+ write_stored_info(target, field=field, value=hash_value)
+ os.remove(old_filename)
+ d = {field: hash_value}
+
+ else:
+ d = {}
+
+ else:
+ lock_name = 'info-%s' % os.path.split(target)[-1]
+ lock_path = os.path.join(CONF.instances_path, 'locks')
+
+ @lockutils.synchronized(lock_name, 'nova-', external=True,
+ lock_path=lock_path)
+ def read_file(info_file):
+ LOG.debug(_('Reading image info file: %s'), info_file)
+ with open(info_file, 'r') as f:
+ return f.read().rstrip()
+
+ serialized = read_file(info_file)
+ d = _read_possible_json(serialized, info_file)
+
+ if field:
+ if timestamped:
+ return (d.get(field, None), d.get('%s-timestamp' % field, None))
+ else:
+ return d.get(field, None)
+ return d
+
+
+def write_stored_info(target, field=None, value=None):
+ """Write information about an image."""
+
+ if not field:
+ return
+
+ info_file = get_info_filename(target)
+ LOG.info(_('Writing stored info to %s'), info_file)
+ fileutils.ensure_tree(os.path.dirname(info_file))
+
+ lock_name = 'info-%s' % os.path.split(target)[-1]
+ lock_path = os.path.join(CONF.instances_path, 'locks')
+
+ @lockutils.synchronized(lock_name, 'nova-', external=True,
+ lock_path=lock_path)
+ def write_file(info_file, field, value):
+ d = {}
+
+ if os.path.exists(info_file):
+ with open(info_file, 'r') as f:
+ d = _read_possible_json(f.read(), info_file)
+
+ d[field] = value
+ d['%s-timestamp' % field] = time.time()
+
+ with open(info_file, 'w') as f:
+ f.write(json.dumps(d))
+
+ write_file(info_file, field, value)
+
+
+def read_stored_checksum(target, timestamped=True):
"""Read the checksum.
Returns the checksum (as hex) or None.
"""
- return virtutils.read_stored_info(target, field='sha1')
+ return read_stored_info(target, field='sha1', timestamped=timestamped)
def write_stored_checksum(target):
"""Write a checksum to disk for a file in _base."""
- if not read_stored_checksum(target):
- img_file = open(target, 'r')
+ with open(target, 'r') as img_file:
checksum = utils.hash_file(img_file)
- img_file.close()
-
- virtutils.write_stored_info(target, field='sha1', value=checksum)
+ write_stored_info(target, field='sha1', value=checksum)
class ImageCacheManager(object):
def __init__(self):
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
self._reset_state()
def _reset_state(self):
@@ -123,8 +253,7 @@ class ImageCacheManager(object):
elif (len(ent) > digest_size + 2 and
ent[digest_size] == '_' and
- not virtutils.is_valid_info_file(os.path.join(base_dir,
- ent))):
+ not is_valid_info_file(os.path.join(base_dir, ent))):
self._store_image(base_dir, ent, original=False)
def _list_running_instances(self, context, all_instances):
@@ -228,35 +357,61 @@ class ImageCacheManager(object):
if not CONF.checksum_base_images:
return None
- stored_checksum = read_stored_checksum(base_file)
- if stored_checksum:
- f = open(base_file, 'r')
- current_checksum = utils.hash_file(f)
- f.close()
-
- if current_checksum != stored_checksum:
- LOG.error(_('%(id)s (%(base_file)s): image verification '
- 'failed'),
- {'id': img_id,
- 'base_file': base_file})
- return False
+ lock_name = 'hash-%s' % os.path.split(base_file)[-1]
+
+ # Protect against other nova-computes performing checksums at the same
+ # time if we are using shared storage
+ @lockutils.synchronized(lock_name, 'nova-', external=True,
+ lock_path=self.lock_path)
+ def inner_verify_checksum():
+ (stored_checksum, stored_timestamp) = read_stored_checksum(
+ base_file, timestamped=True)
+ if stored_checksum:
+ # NOTE(mikal): Checksums are timestamped. If we have recently
+ # checksummed (possibly on another compute node if we are using
+ # shared storage), then we don't need to checksum again.
+ if (stored_timestamp and
+ time.time() - stored_timestamp <
+ CONF.checksum_interval_seconds):
+ return True
+
+ # NOTE(mikal): If there is no timestamp, then the checksum was
+ # performed by a previous version of the code.
+ if not stored_timestamp:
+ write_stored_info(base_file, field='sha1',
+ value=stored_checksum)
+
+ with open(base_file, 'r') as f:
+ current_checksum = utils.hash_file(f)
+
+ if current_checksum != stored_checksum:
+ LOG.error(_('image %(id)s at (%(base_file)s): image '
+ 'verification failed'),
+ {'id': img_id,
+ 'base_file': base_file})
+ return False
+
+ else:
+ return True
else:
- return True
+ LOG.info(_('image %(id)s at (%(base_file)s): image '
+ 'verification skipped, no hash stored'),
+ {'id': img_id,
+ 'base_file': base_file})
- else:
- LOG.info(_('%(id)s (%(base_file)s): image verification skipped, '
- 'no hash stored'),
- {'id': img_id,
- 'base_file': base_file})
+ # NOTE(mikal): If the checksum file is missing, then we should
+ # create one. We don't create checksums when we download images
+ # from glance because that would delay VM startup.
+ if CONF.checksum_base_images and create_if_missing:
+ LOG.info(_('%(id)s (%(base_file)s): generating checksum'),
+ {'id': img_id,
+ 'base_file': base_file})
+ write_stored_checksum(base_file)
- # NOTE(mikal): If the checksum file is missing, then we should
- # create one. We don't create checksums when we download images
- # from glance because that would delay VM startup.
- if create_if_missing:
- write_stored_checksum(base_file)
+ return None
- return None
+ return inner_verify_checksum()
def _remove_base_file(self, base_file):
"""Remove a single base file if it is old enough.
@@ -282,7 +437,7 @@ class ImageCacheManager(object):
LOG.info(_('Removing base file: %s'), base_file)
try:
os.remove(base_file)
- signature = virtutils.get_info_filename(base_file)
+ signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
except OSError, e:
@@ -297,7 +452,7 @@ class ImageCacheManager(object):
image_bad = False
image_in_use = False
- LOG.info(_('%(id)s (%(base_file)s): checking'),
+ LOG.info(_('image %(id)s at (%(base_file)s): checking'),
{'id': img_id,
'base_file': base_file})
@@ -312,47 +467,48 @@ class ImageCacheManager(object):
if not checksum_result is None:
image_bad = not checksum_result
+ # Give other threads a chance to run
+ time.sleep(0)
+
instances = []
if img_id in self.used_images:
local, remote, instances = self.used_images[img_id]
- if local > 0:
- LOG.debug(_('%(id)s (%(base_file)s): '
- 'in use: on this node %(local)d local, '
- '%(remote)d on other nodes'),
- {'id': img_id,
- 'base_file': base_file,
- 'local': local,
- 'remote': remote})
+ if local > 0 or remote > 0:
image_in_use = True
+ LOG.info(_('image %(id)s at (%(base_file)s): '
+ 'in use: on this node %(local)d local, '
+ '%(remote)d on other nodes sharing this instance '
+ 'storage'),
+ {'id': img_id,
+ 'base_file': base_file,
+ 'local': local,
+ 'remote': remote})
+
self.active_base_files.append(base_file)
if not base_file:
- LOG.warning(_('%(id)s (%(base_file)s): warning -- an '
- 'absent base file is in use! instances: '
- '%(instance_list)s'),
+ LOG.warning(_('image %(id)s at (%(base_file)s): warning '
+ '-- an absent base file is in use! '
+ 'instances: %(instance_list)s'),
{'id': img_id,
'base_file': base_file,
'instance_list': ' '.join(instances)})
- else:
- LOG.debug(_('%(id)s (%(base_file)s): in use on (%(remote)d on '
- 'other nodes)'),
- {'id': img_id,
- 'base_file': base_file,
- 'remote': remote})
if image_bad:
self.corrupt_base_files.append(base_file)
if base_file:
if not image_in_use:
- LOG.debug(_('%(id)s (%(base_file)s): image is not in use'),
+ LOG.debug(_('image %(id)s at (%(base_file)s): image is not in '
+ 'use'),
{'id': img_id,
'base_file': base_file})
self.removable_base_files.append(base_file)
else:
- LOG.debug(_('%(id)s (%(base_file)s): image is in use'),
+ LOG.debug(_('image %(id)s at (%(base_file)s): image is in '
+ 'use'),
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
diff --git a/nova/virt/libvirt/snapshots.py b/nova/virt/libvirt/snapshots.py
index 37933876d..c85550eae 100644
--- a/nova/virt/libvirt/snapshots.py
+++ b/nova/virt/libvirt/snapshots.py
@@ -24,7 +24,7 @@ from nova.virt.libvirt import utils as libvirt_utils
class Snapshot(object):
@abc.abstractmethod
def create(self):
- """Create new snapshot"""
+ """Create new snapshot."""
pass
@abc.abstractmethod
@@ -38,7 +38,7 @@ class Snapshot(object):
@abc.abstractmethod
def delete(self):
- """Delete snapshot"""
+ """Delete snapshot."""
pass
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 53b7a7571..4b3517da7 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -19,38 +19,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import errno
-import hashlib
import os
-import re
from lxml import etree
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
-from nova.openstack.common import fileutils
-from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
-
-LOG = logging.getLogger(__name__)
-
-
-util_opts = [
- cfg.StrOpt('image_info_filename_pattern',
- default='$instances_path/$base_dir_name/%(image)s.info',
- help='Allows image information files to be stored in '
- 'non-standard locations')
- ]
-
-CONF = config.CONF
-CONF.register_opts(util_opts)
+CONF = cfg.CONF
CONF.import_opt('instances_path', 'nova.compute.manager')
-CONF.import_opt('base_dir_name', 'nova.compute.manager')
+LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
@@ -58,7 +39,7 @@ def execute(*args, **kwargs):
def get_iscsi_initiator():
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
@@ -128,7 +109,7 @@ def create_lvm_image(vg, lv, size, sparse=False):
"""
free_space = volume_group_free_space(vg)
- def check_size(size):
+ def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
@@ -137,7 +118,7 @@ def create_lvm_image(vg, lv, size, sparse=False):
if sparse:
preallocated_space = 64 * 1024 * 1024
- check_size(preallocated_space)
+ check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
@@ -148,7 +129,7 @@ def create_lvm_image(vg, lv, size, sparse=False):
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
- check_size(size)
+ check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
execute(*cmd, run_as_root=True, attempts=3)
@@ -164,12 +145,42 @@ def volume_group_free_space(vg):
return int(out.strip())
+def volume_group_total_space(vg):
+ """Return total space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--units', 'b', '-o', 'vg_size', vg,
+ run_as_root=True)
+ return int(out.strip())
+
+
+def volume_group_used_space(vg):
+ """Return available space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--separator', '|',
+ '--units', 'b', '-o', 'vg_size,vg_free', vg,
+ run_as_root=True)
+
+ info = out.split('|')
+ if len(info) != 2:
+ raise RuntimeError(_("vg %s must be LVM volume group") % vg)
+
+ return int(info[0]) - int(info[1])
+
+
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
"""
- out, err = execute('lvs', '--noheadings', '-o', 'lv_path', vg,
+ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
@@ -191,8 +202,55 @@ def logical_volume_info(path):
return dict(zip(*info))
+def logical_volume_size(path):
+ """Get logical volume size in bytes.
+
+ :param path: logical volume path
+ """
+ # TODO(p-draigbrady) POssibly replace with the more general
+ # use of blockdev --getsize64 in future
+ out, _err = execute('lvs', '-o', 'lv_size', '--noheadings', '--units',
+ 'b', '--nosuffix', path, run_as_root=True)
+
+ return int(out)
+
+
+def clear_logical_volume(path):
+ """Obfuscate the logical volume.
+
+ :param path: logical volume path
+ """
+ # TODO(p-draigbrady): We currently overwrite with zeros
+ # but we may want to make this configurable in future
+ # for more or less security conscious setups.
+
+ vol_size = logical_volume_size(path)
+ bs = 1024 * 1024
+ direct_flags = ('oflag=direct',)
+ remaining_bytes = vol_size
+
+ # The loop caters for versions of dd that
+ # don't support the iflag=count_bytes option.
+ while remaining_bytes:
+ zero_blocks = remaining_bytes / bs
+ seek_blocks = (vol_size - remaining_bytes) / bs
+ zero_cmd = ('dd', 'bs=%s' % bs,
+ 'if=/dev/zero', 'of=%s' % path,
+ 'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
+ zero_cmd += direct_flags
+ if zero_blocks:
+ utils.execute(*zero_cmd, run_as_root=True)
+ remaining_bytes %= bs
+ bs /= 1024 # Limit to 3 iterations
+ direct_flags = () # Only use O_DIRECT with initial block size
+
+
def remove_logical_volumes(*paths):
"""Remove one or more logical volume."""
+
+ for path in paths:
+ clear_logical_volume(path)
+
if paths:
lvremove = ('lvremove', '-f') + paths
execute(*lvremove, attempts=3, run_as_root=True)
@@ -215,7 +273,7 @@ def pick_disk_driver_name(is_block_dev=False):
if is_block_dev:
return "phy"
else:
- return "tap"
+ return "file"
elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
@@ -412,11 +470,11 @@ def find_disk(virt_dom):
def get_disk_type(path):
- """Retrieve disk type (raw, qcow2, lvm) for given file"""
+ """Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
- return images.qemu_img_info(path)['file format']
+ return images.qemu_img_info(path).file_format
def get_fs_info(path):
@@ -439,97 +497,21 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
- """Grab image"""
+ """Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
-def get_info_filename(base_path):
- """Construct a filename for storing addtional information about a base
- image.
-
- Returns a filename.
- """
-
- base_file = os.path.basename(base_path)
- return (CONF.image_info_filename_pattern
- % {'image': base_file})
-
-
-def is_valid_info_file(path):
- """Test if a given path matches the pattern for info files."""
+def get_instance_path(instance):
+ """Determine the correct path for instance storage.
- digest_size = hashlib.sha1().digestsize * 2
- regexp = (CONF.image_info_filename_pattern
- % {'image': ('([0-9a-f]{%(digest_size)d}|'
- '[0-9a-f]{%(digest_size)d}_sm|'
- '[0-9a-f]{%(digest_size)d}_[0-9]+)'
- % {'digest_size': digest_size})})
- m = re.match(regexp, path)
- if m:
- return True
- return False
+ This used to be calculated all over the place. This method centralizes
+ this into one location, which will make it easier to change the
+ algorithm used to name instance storage directories.
+ :param instance: the instance we want a path for
-def read_stored_info(base_path, field=None):
- """Read information about an image.
-
- Returns an empty dictionary if there is no info, just the field value if
- a field is requested, or the entire dictionary otherwise.
+ :returns: a path to store information about that instance
"""
-
- info_file = get_info_filename(base_path)
- if not os.path.exists(info_file):
- # Special case to handle essex checksums being converted
- old_filename = base_path + '.sha1'
- if field == 'sha1' and os.path.exists(old_filename):
- hash_file = open(old_filename)
- hash_value = hash_file.read()
- hash_file.close()
-
- write_stored_info(base_path, field=field, value=hash_value)
- os.remove(old_filename)
- d = {field: hash_value}
-
- else:
- d = {}
-
- else:
- LOG.info(_('Reading image info file: %s'), info_file)
- f = open(info_file, 'r')
- serialized = f.read().rstrip()
- f.close()
- LOG.info(_('Read: %s'), serialized)
-
- try:
- d = jsonutils.loads(serialized)
-
- except ValueError, e:
- LOG.error(_('Error reading image info file %(filename)s: '
- '%(error)s'),
- {'filename': info_file,
- 'error': e})
- d = {}
-
- if field:
- return d.get(field, None)
- return d
-
-
-def write_stored_info(target, field=None, value=None):
- """Write information about an image."""
-
- if not field:
- return
-
- info_file = get_info_filename(target)
- fileutils.ensure_tree(os.path.dirname(info_file))
-
- d = read_stored_info(info_file)
- d[field] = value
- serialized = jsonutils.dumps(d)
-
- LOG.info(_('Writing image info file: %s'), info_file)
- LOG.info(_('Wrote: %s'), serialized)
- f = open(info_file, 'w')
- f.write(serialized)
- f.close()
+ # TODO(mikal): we should use UUID instead of name, as name isn't
+ # nessesarily unique
+ return os.path.join(CONF.instances_path, instance['name'])
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 56c26dfd1..54de9da2d 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -19,70 +19,87 @@
"""VIF drivers for libvirt."""
-from nova import config
from nova import exception
-from nova import flags
from nova.network import linux_net
+from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-from nova.virt import netutils
-from nova.virt import vif
from nova.virt.libvirt import config as vconfig
-
+from nova.virt.libvirt import designer
+from nova.virt import netutils
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
- cfg.StrOpt('libvirt_ovs_bridge',
- default='br-int',
- help='Name of Integration Bridge used by Open vSwitch'),
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
- default=False,
- help='Use virtio for bridge interfaces'),
+ default=True,
+ help='Use virtio for bridge interfaces with KVM/QEMU'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
-LINUX_DEV_LEN = 14
+class LibvirtBaseVIFDriver(object):
+ def get_vif_devname(self, mapping):
+ if 'vif_devname' in mapping:
+ return mapping['vif_devname']
+ return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN]
-class LibvirtBridgeDriver(vif.VIFDriver):
+ def get_config(self, instance, network, mapping):
+ conf = vconfig.LibvirtConfigGuestInterface()
+ model = None
+ driver = None
+ if (CONF.libvirt_type in ('kvm', 'qemu') and
+ CONF.libvirt_use_virtio_for_bridges):
+ model = "virtio"
+ # Workaround libvirt bug, where it mistakenly
+ # enables vhost mode, even for non-KVM guests
+ if CONF.libvirt_type == "qemu":
+ driver = "qemu"
+
+ designer.set_vif_guest_frontend_config(
+ conf, mapping['mac'], model, driver)
+
+ return conf
+
+
+class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
"""VIF driver for Linux bridge."""
- def _get_configurations(self, instance, network, mapping):
+ def get_config(self, instance, network, mapping):
"""Get VIF configurations for bridge type."""
mac_id = mapping['mac'].replace(':', '')
- conf = vconfig.LibvirtConfigGuestInterface()
- conf.net_type = "bridge"
- conf.mac_addr = mapping['mac']
- conf.source_dev = network['bridge']
- conf.script = ""
- if CONF.libvirt_use_virtio_for_bridges:
- conf.model = "virtio"
-
- conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
- conf.add_filter_param("IP", mapping['ips'][0]['ip'])
- if mapping['dhcp_server']:
- conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
+ conf = super(LibvirtBridgeDriver,
+ self).get_config(instance,
+ network,
+ mapping)
- if CONF.use_ipv6:
- conf.add_filter_param("RASERVER",
- mapping.get('gateway_v6') + "/128")
+ designer.set_vif_host_backend_bridge_config(
+ conf, network['bridge'], self.get_vif_devname(mapping))
+
+ name = "nova-instance-" + instance['name'] + "-" + mac_id
+ primary_addr = mapping['ips'][0]['ip']
+ dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
+ if mapping['dhcp_server']:
+ dhcp_server = mapping['dhcp_server']
+ if CONF.use_ipv6:
+ ra_server = mapping.get('gateway_v6') + "/128"
if CONF.allow_same_net_traffic:
- net, mask = netutils.get_net_and_mask(network['cidr'])
- conf.add_filter_param("PROJNET", net)
- conf.add_filter_param("PROJMASK", mask)
+ ipv4_cidr = network['cidr']
if CONF.use_ipv6:
- net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
- network['cidr_v6'])
- conf.add_filter_param("PROJNET6", net_v6)
- conf.add_filter_param("PROJMASK6", prefixlen_v6)
+ ipv6_cidr = network['cidr_v6']
+
+ designer.set_vif_host_backend_filter_config(
+ conf, name, primary_addr, dhcp_server,
+ ra_server, ipv4_cidr, ipv6_cidr)
return conf
@@ -109,43 +126,50 @@ class LibvirtBridgeDriver(vif.VIFDriver):
network['bridge'],
iface)
- return self._get_configurations(instance, network, mapping)
-
def unplug(self, instance, vif):
"""No manual unplugging required."""
pass
-class LibvirtOpenVswitchDriver(vif.VIFDriver):
+class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
"""VIF driver for Open vSwitch that uses libivrt type='ethernet'
Used for libvirt versions that do not support
OVS virtual port XML (0.9.10 or earlier).
"""
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
+ def get_config(self, instance, network, mapping):
+ dev = self.get_vif_devname(mapping)
- def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
+ conf = super(LibvirtOpenVswitchDriver,
+ self).get_config(instance,
+ network,
+ mapping)
+
+ designer.set_vif_host_backend_ethernet_config(conf, dev)
+
+ return conf
+
+ def create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
- CONF.libvirt_ovs_bridge, dev,
- '--', 'set', 'Interface', dev,
- 'external-ids:iface-id=%s' % iface_id,
- 'external-ids:iface-status=active',
- 'external-ids:attached-mac=%s' % mac,
- 'external-ids:vm-uuid=%s' % instance_id,
- run_as_root=True)
-
- def delete_ovs_vif_port(self, dev):
- utils.execute('ovs-vsctl', 'del-port', CONF.libvirt_ovs_bridge,
- dev, run_as_root=True)
+ bridge, dev,
+ '--', 'set', 'Interface', dev,
+ 'external-ids:iface-id=%s' % iface_id,
+ 'external-ids:iface-status=active',
+ 'external-ids:attached-mac=%s' % mac,
+ 'external-ids:vm-uuid=%s' % instance_id,
+ run_as_root=True)
+
+ def delete_ovs_vif_port(self, bridge, dev):
+ utils.execute('ovs-vsctl', 'del-port', bridge, dev,
+ run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
- if not linux_net._device_exists(dev):
+ dev = self.get_vif_devname(mapping)
+ if not linux_net.device_exists(dev):
# Older version of the command 'ip' from the iproute2 package
# don't have support for the tuntap option (lp:882568). If it
# turns out we're on an old version we work around this by using
@@ -159,25 +183,16 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
- self.create_ovs_vif_port(dev, iface_id, mapping['mac'],
+ self.create_ovs_vif_port(network['bridge'],
+ dev, iface_id, mapping['mac'],
instance['uuid'])
- conf = vconfig.LibvirtConfigGuestInterface()
-
- if CONF.libvirt_use_virtio_for_bridges:
- conf.model = "virtio"
- conf.net_type = "ethernet"
- conf.target_dev = dev
- conf.script = ""
- conf.mac_addr = mapping['mac']
-
- return conf
-
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
network, mapping = vif
- self.delete_ovs_vif_port(self.get_dev_name(mapping['vif_uuid']))
+ self.delete_ovs_vif_port(network['bridge'],
+ self.get_vif_devname(mapping))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
@@ -193,11 +208,19 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
"""
def get_br_name(self, iface_id):
- return ("qbr" + iface_id)[:LINUX_DEV_LEN]
+ return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
- return (("qvb%s" % iface_id)[:LINUX_DEV_LEN],
- ("qvo%s" % iface_id)[:LINUX_DEV_LEN])
+ return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
+ ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
+
+ def get_config(self, instance, network, mapping):
+ br_name = self.get_br_name(mapping['vif_uuid'])
+ network['bridge'] = br_name
+ return super(LibvirtHybridOVSBridgeDriver,
+ self).get_config(instance,
+ network,
+ mapping)
def plug(self, instance, vif):
"""Plug using hybrid strategy
@@ -213,19 +236,17 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
br_name = self.get_br_name(iface_id)
v1_name, v2_name = self.get_veth_pair_names(iface_id)
- if not linux_net._device_exists(br_name):
+ if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
- if not linux_net._device_exists(v2_name):
+ if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
- self.create_ovs_vif_port(v2_name, iface_id, mapping['mac'],
+ self.create_ovs_vif_port(network['bridge'],
+ v2_name, iface_id, mapping['mac'],
instance['uuid'])
- network['bridge'] = br_name
- return self._get_configurations(instance, network, mapping)
-
def unplug(self, instance, vif):
"""UnPlug using hybrid strategy
@@ -243,67 +264,57 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
- self.delete_ovs_vif_port(v2_name)
+ self.delete_ovs_vif_port(network['bridge'], v2_name)
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
-class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver):
+class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
"""VIF driver for Open vSwitch that uses integrated libvirt
OVS virtual port XML (introduced in libvirt 0.9.11)."""
- def plug(self, instance, vif):
- """ Pass data required to create OVS virtual port element"""
- network, mapping = vif
+ def get_config(self, instance, network, mapping):
+ """Pass data required to create OVS virtual port element."""
+ conf = super(LibvirtOpenVswitchVirtualPortDriver,
+ self).get_config(instance,
+ network,
+ mapping)
- conf = vconfig.LibvirtConfigGuestInterface()
-
- conf.net_type = "bridge"
- conf.source_dev = CONF.libvirt_ovs_bridge
- conf.mac_addr = mapping['mac']
- if CONF.libvirt_use_virtio_for_bridges:
- conf.model = "virtio"
- conf.vporttype = "openvswitch"
- conf.add_vport_param("interfaceid", mapping['vif_uuid'])
+ designer.set_vif_host_backend_ovs_config(
+ conf, network['bridge'], mapping['vif_uuid'],
+ self.get_vif_devname(mapping))
return conf
+ def plug(self, instance, vif):
+ pass
+
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
-class QuantumLinuxBridgeVIFDriver(vif.VIFDriver):
+class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
+ def get_config(self, instance, network, mapping):
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(network['bridge'],
+ None,
+ filtering=False)
- def plug(self, instance, vif):
- network, mapping = vif
- iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
-
- if CONF.libvirt_type != 'xen':
- linux_net.QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev)
-
- conf = vconfig.LibvirtConfigGuestInterface()
+ conf = super(QuantumLinuxBridgeVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping)
- if CONF.libvirt_use_virtio_for_bridges:
- conf.model = 'virtio'
- conf.net_type = "ethernet"
- conf.target_dev = dev
- conf.script = ""
- conf.mac_addr = mapping['mac']
+ designer.set_vif_host_backend_bridge_config(
+ conf, network['bridge'], self.get_vif_devname(mapping))
return conf
+ def plug(self, instance, vif):
+ pass
+
def unplug(self, instance, vif):
- """Unplug the VIF by deleting the port from the bridge."""
- network, mapping = vif
- dev = self.get_dev_name(mapping['vif_uuid'])
- try:
- utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
- except exception.ProcessExecutionError:
- LOG.warning(_("Failed while unplugging vif"), instance=instance)
- raise
+ """No action needed. Libvirt takes care of cleanup."""
+ pass
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 03c335fa0..f9a948fb5 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -20,9 +20,8 @@
import os
import time
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
@@ -30,8 +29,22 @@ from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
-CONF = config.CONF
-CONF.import_opt('num_iscsi_scan_tries', 'nova.volume.driver')
+
+volume_opts = [
+ cfg.IntOpt('num_iscsi_scan_tries',
+ default=3,
+ help='number of times to rescan iSCSI target to find volume'),
+ cfg.StrOpt('rbd_user',
+ default=None,
+ help='the RADOS client name for accessing rbd volumes'),
+ cfg.StrOpt('rbd_secret_uuid',
+ default=None,
+ help='the libvirt uuid of the secret for the rbd_user'
+ 'volumes')
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
class LibvirtVolumeDriver(object):
@@ -53,7 +66,7 @@ class LibvirtVolumeDriver(object):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
@@ -127,7 +140,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, mount_device):
- """Attach the volume to instance_name"""
+ """Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
@@ -197,7 +210,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, mount_device):
- """Detach the volume from instance_name"""
+ """Detach the volume from instance_name."""
sup = super(LibvirtISCSIVolumeDriver, self)
sup.disconnect_volume(connection_info, mount_device)
iscsi_properties = connection_info['data']
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index 5dec74e3d..b5083937d 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -20,11 +20,10 @@
import ctypes
import os
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
+from nova import paths
from nova import utils
from nova.virt.libvirt import volume
@@ -32,18 +31,18 @@ LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('nfs_mount_point_base',
- default='$state_path/mnt',
+ default=paths.state_path_def('mnt'),
help='Base dir where nfs expected to be mounted on compute'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(volume_opts)
class NfsVolumeDriver(volume.LibvirtVolumeDriver):
- """ Class implements libvirt part of volume driver for NFS
- """
+ """Class implements libvirt part of volume driver for NFS."""
+
def __init__(self, *args, **kwargs):
- """Create back-end to nfs and check connection"""
+ """Create back-end to nfs and check connection."""
super(NfsVolumeDriver, self).__init__(*args, **kwargs)
def connect_volume(self, connection_info, mount_device):
@@ -57,7 +56,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
def _ensure_mounted(self, nfs_export):
@@ -70,7 +69,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return mount_path
def _mount_nfs(self, mount_path, nfs_share, ensure=False):
- """Mount nfs export to mount path"""
+ """Mount nfs export to mount path."""
if not self._path_exists(mount_path):
utils.execute('mkdir', '-p', mount_path)
@@ -85,12 +84,12 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
@staticmethod
def get_hash_str(base_str):
- """returns string that represents hash of base_str (in a hex format)"""
+ """returns string that represents hash of base_str (in hex format)."""
return str(ctypes.c_uint64(hash(base_str)).value)
@staticmethod
def _path_exists(path):
- """Check path """
+ """Check path."""
try:
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 484cb3db3..70f1544c4 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -18,15 +18,15 @@
# under the License.
-"""Network-releated utilities for supporting libvirt connection code."""
+"""Network-related utilities for supporting libvirt connection code."""
import netaddr
-from nova import config
-from nova import flags
+from nova.openstack.common import cfg
-CONF = config.CONF
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
Template = None
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
new file mode 100644
index 000000000..fb3a0210c
--- /dev/null
+++ b/nova/virt/powervm/blockdev.py
@@ -0,0 +1,425 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import os
+import re
+
+from eventlet import greenthread
+
+from nova import utils
+
+from nova.image import glance
+
+from nova.openstack.common import cfg
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+
+from nova.virt import images
+from nova.virt.powervm import command
+from nova.virt.powervm import common
+from nova.virt.powervm import constants
+from nova.virt.powervm import exception
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class PowerVMDiskAdapter(object):
+ pass
+
+
+class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
+ """Default block device providor for PowerVM
+
+ This disk adapter uses logical volumes on the hosting VIOS
+ to provide backing block devices for instances/LPARs
+ """
+
+ def __init__(self, connection):
+ super(PowerVMLocalVolumeAdapter, self).__init__()
+
+ self.command = command.IVMCommand()
+
+ self._connection = None
+ self.connection_data = connection
+
+ def _set_connection(self):
+ if self._connection is None:
+ self._connection = common.ssh_connect(self.connection_data)
+
+ def create_volume(self, size):
+ """Creates a logical volume with a minimum size
+
+ :param size: size of the logical volume in bytes
+ :returns: string -- the name of the new logical volume.
+ :raises: PowerVMNoSpaceLeftOnVolumeGroup
+ """
+ return self._create_logical_volume(size)
+
+ def delete_volume(self, disk_name):
+ """Removes the Logical Volume and its associated vSCSI connection
+
+ :param disk_name: name of Logical Volume device in /dev/
+ """
+ LOG.debug(_("Removing the logical volume '%s'") % disk_name)
+ self._remove_logical_volume(disk_name)
+
+ def create_volume_from_image(self, context, instance, image_id):
+ """Creates a Logical Volume and copies the specified image to it
+
+ :param context: nova context used to retrieve image from glance
+ :param instance: instance to create the volume for
+ :param image_id: image_id reference used to locate image in glance
+ :returns: dictionary with the name of the created
+ Logical Volume device in 'device_name' key
+ """
+
+ file_name = '.'.join([image_id, 'gz'])
+ file_path = os.path.join(CONF.powervm_img_local_path,
+ file_name)
+
+ if not os.path.isfile(file_path):
+ LOG.debug(_("Fetching image '%s' from glance") % image_id)
+ images.fetch_to_raw(context, image_id, file_path,
+ instance['user_id'],
+ project_id=instance['project_id'])
+ else:
+ LOG.debug((_("Using image found at '%s'") % file_path))
+
+ LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path)
+ remote_path = CONF.powervm_img_remote_path
+ remote_file_name, size = self._copy_image_file(file_path, remote_path)
+
+ # calculate root device size in bytes
+ # we respect the minimum root device size in constants
+ size_gb = max(instance['instance_type']['root_gb'],
+ constants.POWERVM_MIN_ROOT_GB)
+ size = size_gb * 1024 * 1024 * 1024
+
+ try:
+ LOG.debug(_("Creating logical volume of size %s bytes") % size)
+ disk_name = self._create_logical_volume(size)
+
+ LOG.debug(_("Copying image to the device '%s'") % disk_name)
+ self._copy_file_to_device(remote_file_name, disk_name)
+ except Exception:
+ LOG.error(_("Error while creating logical volume from image. "
+ "Will attempt cleanup."))
+ # attempt cleanup of logical volume before re-raising exception
+ with excutils.save_and_reraise_exception():
+ try:
+ self.delete_volume(disk_name)
+ except Exception:
+ msg = _('Error while attempting cleanup of failed '
+ 'deploy to logical volume.')
+ LOG.exception(msg)
+
+ return {'device_name': disk_name}
+
+ def create_image_from_volume(self, device_name, context,
+ image_id, image_meta):
+ """Capture the contents of a volume and upload to glance
+
+ :param device_name: device in /dev/ to capture
+ :param context: nova context for operation
+ :param image_id: image reference to pre-created image in glance
+ :param image_meta: metadata for new image
+ """
+
+ # do the disk copy
+ dest_file_path = common.aix_path_join(CONF.powervm_img_remote_path,
+ image_id)
+ self._copy_device_to_file(device_name, dest_file_path)
+
+ # compress and copy the file back to the nova-compute host
+ snapshot_file_path = self._copy_image_file_from_host(
+ dest_file_path, CONF.powervm_img_local_path,
+ compress=True)
+
+ # get glance service
+ glance_service, image_id = glance.get_remote_image_service(
+ context, image_id)
+
+ # upload snapshot file to glance
+ with open(snapshot_file_path, 'r') as img_file:
+ glance_service.update(context,
+ image_id,
+ image_meta,
+ img_file)
+ LOG.debug(_("Snapshot added to glance."))
+
+ # clean up local image file
+ try:
+ os.remove(snapshot_file_path)
+ except OSError as ose:
+ LOG.warn(_("Failed to clean up snapshot file "
+ "%(snapshot_file_path)s") % locals())
+
+ def migrate_volume(self):
+ raise NotImplementedError()
+
+ def attach_volume_to_host(self, *args, **kargs):
+ pass
+
+ def detach_volume_from_host(self, *args, **kargs):
+ pass
+
+ def _create_logical_volume(self, size):
+ """Creates a logical volume with a minimum size.
+
+ :param size: size of the logical volume in bytes
+ :returns: string -- the name of the new logical volume.
+ :raises: PowerVMNoSpaceLeftOnVolumeGroup
+ """
+ vgs = self.run_vios_command(self.command.lsvg())
+ cmd = self.command.lsvg('%s -field vgname freepps -fmt :' %
+ ' '.join(vgs))
+ output = self.run_vios_command(cmd)
+ found_vg = None
+
+ # If it's not a multiple of 1MB we get the next
+ # multiple and use it as the megabyte_size.
+ megabyte = 1024 * 1024
+ if (size % megabyte) != 0:
+ megabyte_size = int(size / megabyte) + 1
+ else:
+ megabyte_size = size / megabyte
+
+ # Search for a volume group with enough free space for
+ # the new logical volume.
+ for vg in output:
+ # Returned output example: 'rootvg:396 (25344 megabytes)'
+ match = re.search(r'^(\w+):\d+\s\((\d+).+$', vg)
+ if match is None:
+ continue
+ vg_name, avail_size = match.groups()
+ if megabyte_size <= int(avail_size):
+ found_vg = vg_name
+ break
+
+ if not found_vg:
+ LOG.error(_('Could not create logical volume. '
+ 'No space left on any volume group.'))
+ raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
+
+ cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))
+ lv_name = self.run_vios_command(cmd)[0]
+ return lv_name
+
+ def _remove_logical_volume(self, lv_name):
+ """Removes the lv and the connection between its associated vscsi.
+
+ :param lv_name: a logical volume name
+ """
+ cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)
+ self.run_vios_command(cmd)
+
+ def _copy_file_to_device(self, source_path, device, decompress=True):
+ """Copy file to device.
+
+ :param source_path: path to input source file
+ :param device: output device name
+ :param decompress: if True (default) the file will be decompressed
+ on the fly while being copied to the drive
+ """
+ if decompress:
+ cmd = ('gunzip -c %s | dd of=/dev/%s bs=1024k' %
+ (source_path, device))
+ else:
+ cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
+ self.run_vios_command_as_root(cmd)
+
+ def _copy_device_to_file(self, device_name, file_path):
+ """Copy a device to a file using dd
+
+ :param device_name: device name to copy from
+ :param file_path: output file path
+ """
+ cmd = 'dd if=/dev/%s of=%s bs=1024k' % (device_name, file_path)
+ self.run_vios_command_as_root(cmd)
+
+ def _md5sum_remote_file(self, remote_path):
+ # AIX6/VIOS cannot md5sum files with sizes greater than ~2GB
+ cmd = ("perl -MDigest::MD5 -e 'my $file = \"%s\"; open(FILE, $file); "
+ "binmode(FILE); "
+ "print Digest::MD5->new->addfile(*FILE)->hexdigest, "
+ "\" $file\n\";'" % remote_path)
+
+ output = self.run_vios_command_as_root(cmd)
+ return output[0]
+
+ def _copy_image_file(self, source_path, remote_path, decompress=False):
+ """Copy file to VIOS, decompress it, and return its new size and name.
+
+ :param source_path: source file path
+ :param remote_path remote file path
+ :param decompress: if True, decompressess the file after copying;
+ if False (default), just copies the file
+ """
+ # Calculate source image checksum
+ hasher = hashlib.md5()
+ block_size = 0x10000
+ img_file = file(source_path, 'r')
+ buf = img_file.read(block_size)
+ while len(buf) > 0:
+ hasher.update(buf)
+ buf = img_file.read(block_size)
+ source_cksum = hasher.hexdigest()
+
+ comp_path = os.path.join(remote_path, os.path.basename(source_path))
+ uncomp_path = comp_path.rstrip(".gz")
+ if not decompress:
+ final_path = comp_path
+ else:
+ final_path = uncomp_path
+
+ # Check whether the image is already on IVM
+ output = self.run_vios_command("ls %s" % final_path,
+ check_exit_code=False)
+
+ # If the image does not exist already
+ if not output:
+ # Copy file to IVM
+ common.ftp_put_command(self.connection_data, source_path,
+ remote_path)
+
+ # Verify image file checksums match
+ output = self._md5sum_remote_file(final_path)
+ if not output:
+ LOG.error(_("Unable to get checksum"))
+ raise exception.PowerVMFileTransferFailed()
+ if source_cksum != output.split(' ')[0]:
+ LOG.error(_("Image checksums do not match"))
+ raise exception.PowerVMFileTransferFailed()
+
+ if decompress:
+ # Unzip the image
+ cmd = "/usr/bin/gunzip %s" % comp_path
+ output = self.run_vios_command_as_root(cmd)
+
+ # Remove existing image file
+ cmd = "/usr/bin/rm -f %s.*" % uncomp_path
+ output = self.run_vios_command_as_root(cmd)
+
+ # Rename unzipped image
+ cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
+ output = self.run_vios_command_as_root(cmd)
+
+ # Remove compressed image file
+ cmd = "/usr/bin/rm -f %s" % comp_path
+ output = self.run_vios_command_as_root(cmd)
+
+ else:
+ LOG.debug(_("Image found on host at '%s'") % final_path)
+
+ # Calculate file size in multiples of 512 bytes
+ output = self.run_vios_command("ls -o %s|awk '{print $4}'" %
+ final_path, check_exit_code=False)
+ if output:
+ size = int(output[0])
+ else:
+ LOG.error(_("Uncompressed image file not found"))
+ raise exception.PowerVMFileTransferFailed()
+ if (size % 512 != 0):
+ size = (int(size / 512) + 1) * 512
+
+ return final_path, size
+
+ def _copy_image_file_from_host(self, remote_source_path, local_dest_dir,
+ compress=False):
+ """
+ Copy a file from IVM to the nova-compute host,
+ and return the location of the copy
+
+ :param remote_source_path remote source file path
+ :param local_dest_dir local destination directory
+ :param compress: if True, compress the file before transfer;
+ if False (default), copy the file as is
+ """
+
+ temp_str = common.aix_path_join(local_dest_dir,
+ os.path.basename(remote_source_path))
+ local_file_path = temp_str + '.gz'
+
+ if compress:
+ copy_from_path = remote_source_path + '.gz'
+ else:
+ copy_from_path = remote_source_path
+
+ if compress:
+ # Gzip the file
+ cmd = "/usr/bin/gzip %s" % remote_source_path
+ self.run_vios_command_as_root(cmd)
+
+ # Cleanup uncompressed remote file
+ cmd = "/usr/bin/rm -f %s" % remote_source_path
+ self.run_vios_command_as_root(cmd)
+
+ # Get file checksum
+ output = self._md5sum_remote_file(copy_from_path)
+ if not output:
+ LOG.error(_("Unable to get checksum"))
+ msg_args = {'file_path': copy_from_path}
+ raise exception.PowerVMFileTransferFailed(**msg_args)
+ else:
+ source_chksum = output.split(' ')[0]
+
+ # Copy file to host
+ common.ftp_get_command(self.connection_data,
+ copy_from_path,
+ local_file_path)
+
+ # Calculate copied image checksum
+ with open(local_file_path, 'r') as image_file:
+ hasher = hashlib.md5()
+ block_size = 0x10000
+ buf = image_file.read(block_size)
+ while len(buf) > 0:
+ hasher.update(buf)
+ buf = image_file.read(block_size)
+ dest_chksum = hasher.hexdigest()
+
+ # do comparison
+ if source_chksum and dest_chksum != source_chksum:
+ LOG.error(_("Image checksums do not match"))
+ raise exception.PowerVMFileTransferFailed()
+
+ # Cleanup transferred remote file
+ cmd = "/usr/bin/rm -f %s" % copy_from_path
+ output = self.run_vios_command_as_root(cmd)
+
+ return local_file_path
+
+ def run_vios_command(self, cmd, check_exit_code=True):
+ """Run a remote command using an active ssh connection.
+
+ :param command: String with the command to run.
+ """
+ self._set_connection()
+ stdout, stderr = utils.ssh_execute(self._connection, cmd,
+ check_exit_code=check_exit_code)
+ return stdout.strip().splitlines()
+
+ def run_vios_command_as_root(self, command, check_exit_code=True):
+ """Run a remote command as root using an active ssh connection.
+
+ :param command: List of commands.
+ """
+ self._set_connection()
+ stdout, stderr = common.ssh_command_as_root(
+ self._connection, command, check_exit_code=check_exit_code)
+ return stdout.read().splitlines()
diff --git a/nova/virt/powervm/common.py b/nova/virt/powervm/common.py
index 179bd7f14..bf69be84e 100644
--- a/nova/virt/powervm/common.py
+++ b/nova/virt/powervm/common.py
@@ -63,6 +63,7 @@ def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True):
:returns: Tuple -- a tuple of (stdout, stderr)
:raises: nova.exception.ProcessExecutionError
"""
+ LOG.debug(_('Running cmd (SSH-as-root): %s') % cmd)
chan = ssh_connection._transport.open_session()
# This command is required to be executed
# in order to become root.
@@ -108,5 +109,48 @@ def ftp_put_command(connection, local_path, remote_dir):
f.close()
ftp.close()
except Exception:
- LOG.exception(_('File transfer to PowerVM manager failed'))
- raise exception.PowerVMFileTransferFailed(file_path=local_path)
+ LOG.error(_('File transfer to PowerVM manager failed'))
+ raise exception.PowerVMFTPTransferFailed(ftp_cmd='PUT',
+ source_path=local_path, dest_path=remote_dir)
+
+
+def ftp_get_command(connection, remote_path, local_path):
+ """Retrieve a file via FTP
+
+ :param connection: a Connection object.
+ :param remote_path: path to the remote file
+ :param local_path: path to local destination
+ :raises: PowerVMFileTransferFailed
+ """
+ try:
+ ftp = ftplib.FTP(host=connection.host,
+ user=connection.username,
+ passwd=connection.password)
+ ftp.cwd(os.path.dirname(remote_path))
+ name = os.path.basename(remote_path)
+ LOG.debug(_("ftp GET %(remote_path)s to: %(local_path)s") % locals())
+ with open(local_path, 'w') as ftpfile:
+ ftpcmd = 'RETR %s' % name
+ ftp.retrbinary(ftpcmd, ftpfile.write)
+ ftp.close()
+ except Exception:
+ LOG.error(_("File transfer from PowerVM manager failed"))
+ raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',
+ source_path=remote_path, dest_path=local_path)
+
+
+def aix_path_join(path_one, path_two):
+ """Ensures file path is built correctly for remote UNIX system
+
+ :param path_one: string of the first file path
+ :param path_two: string of the second file path
+ :returns: a uniform path constructed from both strings
+ """
+ if path_one.endswith('/'):
+ path_one = path_one.rstrip('/')
+
+ if path_two.startswith('/'):
+ path_two = path_two.lstrip('/')
+
+ final_path = path_one + '/' + path_two
+ return final_path
diff --git a/nova/virt/powervm/constants.py b/nova/virt/powervm/constants.py
index 1990ec5a5..0d1e0892e 100644
--- a/nova/virt/powervm/constants.py
+++ b/nova/virt/powervm/constants.py
@@ -18,17 +18,21 @@ from nova.compute import power_state
POWERVM_NOSTATE = ''
POWERVM_RUNNING = 'Running'
+POWERVM_STARTING = 'Starting'
POWERVM_SHUTDOWN = 'Not Activated'
POWERVM_POWER_STATE = {
POWERVM_NOSTATE: power_state.NOSTATE,
POWERVM_RUNNING: power_state.RUNNING,
POWERVM_SHUTDOWN: power_state.SHUTDOWN,
+ POWERVM_STARTING: power_state.RUNNING
}
POWERVM_CPU_INFO = ('ppc64', 'powervm', '3940')
POWERVM_HYPERVISOR_TYPE = 'powervm'
POWERVM_HYPERVISOR_VERSION = '7.1'
+POWERVM_MIN_ROOT_GB = 10
+
POWERVM_MIN_MEM = 512
POWERVM_MAX_MEM = 1024
POWERVM_MAX_CPUS = 1
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 7a0da0b88..ccba3cf73 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -14,12 +14,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import time
+
from nova.compute import task_states
from nova.compute import vm_states
-from nova import config
from nova import context as nova_context
-from nova import flags
+
+from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -51,7 +54,7 @@ powervm_opts = [
help='Local directory to download glance images to'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(powervm_opts)
@@ -86,7 +89,7 @@ class PowerVMDriver(driver.ComputeDriver):
return self._powervm.list_instances()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
return self._powervm.get_host_stats(refresh=refresh)
def plug_vifs(self, instance, network_info):
@@ -97,9 +100,10 @@ class PowerVMDriver(driver.ComputeDriver):
"""Create a new instance/VM/domain on powerVM."""
self._powervm.spawn(context, instance, image_meta['id'])
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance."""
- self._powervm.destroy(instance['name'])
+ self._powervm.destroy(instance['name'], destroy_disks)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
@@ -118,20 +122,62 @@ class PowerVMDriver(driver.ComputeDriver):
"""
pass
+ def snapshot(self, context, instance, image_id):
+ """Snapshots the specified instance.
+
+ :param context: security context
+ :param instance: Instance object as returned by DB layer.
+ :param image_id: Reference to a pre-created image that will
+ hold the snapshot.
+ """
+ snapshot_start = time.time()
+
+ # get current image info
+ glance_service, old_image_id = glance.get_remote_image_service(
+ context, instance['image_ref'])
+ image_meta = glance_service.show(context, old_image_id)
+ img_props = image_meta['properties']
+
+ # build updated snapshot metadata
+ snapshot_meta = glance_service.show(context, image_id)
+ new_snapshot_meta = {'is_public': False,
+ 'name': snapshot_meta['name'],
+ 'status': 'active',
+ 'properties': {'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id']
+ },
+ 'disk_format': image_meta['disk_format'],
+ 'container_format': image_meta['container_format']
+ }
+
+ if 'architecture' in image_meta['properties']:
+ arch = image_meta['properties']['architecture']
+ new_snapshot_meta['properties']['architecture'] = arch
+
+ # disk capture and glance upload
+ self._powervm.capture_image(context, instance, image_id,
+ new_snapshot_meta)
+
+ snapshot_time = time.time() - snapshot_start
+ inst_name = instance['name']
+ LOG.info(_("%(inst_name)s captured in %(snapshot_time)s seconds") %
+ locals())
+
def pause(self, instance):
"""Pause the specified instance."""
pass
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
pass
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
pass
- def resume(self, instance):
- """resume the specified instance"""
+ def resume(self, instance, network_info, block_device_info=None):
+ """resume the specified instance."""
pass
def power_off(self, instance):
@@ -139,10 +185,10 @@ class PowerVMDriver(driver.ComputeDriver):
self._powervm.power_off(instance['name'])
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._powervm.power_on(instance['name'])
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info."""
return self._powervm.get_available_resource()
diff --git a/nova/virt/powervm/exception.py b/nova/virt/powervm/exception.py
index 2a8cf4771..50e08eaea 100644
--- a/nova/virt/powervm/exception.py
+++ b/nova/virt/powervm/exception.py
@@ -22,7 +22,11 @@ class PowerVMConnectionFailed(exception.NovaException):
class PowerVMFileTransferFailed(exception.NovaException):
- message = _("File '%(file_path)' transfer to PowerVM manager failed")
+ message = _("File '%(file_path)s' transfer to PowerVM manager failed")
+
+
+class PowerVMFTPTransferFailed(PowerVMFileTransferFailed):
+ message = _("FTP %(ftp_cmd)s from %(source_path)s to %(dest_path)s failed")
class PowerVMLPARInstanceNotFound(exception.InstanceNotFound):
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 09ad662b3..b25a96159 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -15,19 +15,18 @@
# under the License.
import decimal
-import hashlib
-import os
import re
import time
-from nova import config
from nova import exception as nova_exception
-from nova import flags
from nova import utils
from nova.compute import power_state
+from nova.openstack.common import cfg
+from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova.virt import images
+
+from nova.virt.powervm import blockdev
from nova.virt.powervm import command
from nova.virt.powervm import common
from nova.virt.powervm import constants
@@ -36,7 +35,7 @@ from nova.virt.powervm import lpar as LPAR
LOG = logging.getLogger(__name__)
-CONF = config.CONF
+CONF = cfg.CONF
def get_powervm_operator():
@@ -46,6 +45,13 @@ def get_powervm_operator():
CONF.powervm_mgr_passwd))
+def get_powervm_disk_adapter():
+ return blockdev.PowerVMLocalVolumeAdapter(
+ common.Connection(CONF.powervm_mgr,
+ CONF.powervm_mgr_user,
+ CONF.powervm_mgr_passwd))
+
+
class PowerVMOperator(object):
"""PowerVM main operator.
@@ -55,6 +61,7 @@ class PowerVMOperator(object):
def __init__(self):
self._operator = get_powervm_operator()
+ self._disk_adapter = get_powervm_disk_adapter()
self._host_stats = {}
self._update_host_stats()
@@ -73,7 +80,8 @@ class PowerVMOperator(object):
"""
lpar_instance = self._get_instance(instance_name)
- state = constants.POWERVM_POWER_STATE[lpar_instance['state']]
+ state = constants.POWERVM_POWER_STATE.get(
+ lpar_instance['state'], power_state.NOSTATE)
return {'state': state,
'max_mem': lpar_instance['max_mem'],
'mem': lpar_instance['desired_mem'],
@@ -129,7 +137,7 @@ class PowerVMOperator(object):
return dic
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
if refresh:
self._update_host_stats()
return self._host_stats
@@ -217,29 +225,21 @@ class PowerVMOperator(object):
def _create_image(context, instance, image_id):
"""Fetch image from glance and copy it to the remote system."""
try:
- file_name = '.'.join([image_id, 'gz'])
- file_path = os.path.join(CONF.powervm_img_local_path,
- file_name)
- LOG.debug(_("Fetching image '%s' from glance") % image_id)
- images.fetch_to_raw(context, image_id, file_path,
- instance['user_id'],
- project_id=instance['project_id'])
- LOG.debug(_("Copying image '%s' to IVM") % file_path)
- remote_path = CONF.powervm_img_remote_path
- remote_file_name, size = self._operator.copy_image_file(
- file_path, remote_path)
- # Logical volume
- LOG.debug(_("Creating logical volume"))
+ root_volume = self._disk_adapter.create_volume_from_image(
+ context, instance, image_id)
+
+ self._disk_adapter.attach_volume_to_host(root_volume)
+
lpar_id = self._operator.get_lpar(instance['name'])['lpar_id']
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
- disk_name = self._operator.create_logical_volume(size)
- self._operator.attach_disk_to_vhost(disk_name, vhost)
- LOG.debug(_("Copying image to the device '%s'") % disk_name)
- self._operator.copy_file_to_device(remote_file_name, disk_name)
+ self._operator.attach_disk_to_vhost(
+ root_volume['device_name'], vhost)
except Exception, e:
LOG.exception(_("PowerVM image creation failed: %s") % str(e))
raise exception.PowerVMImageCreationFailed()
+ spawn_start = time.time()
+
try:
_create_lpar_instance(instance)
_create_image(context, instance, image_id)
@@ -264,20 +264,60 @@ class PowerVMOperator(object):
time.sleep(1)
except exception.PowerVMImageCreationFailed:
- self._cleanup(instance['name'])
+ with excutils.save_and_reraise_exception():
+ # log errors in cleanup
+ try:
+ self._cleanup(instance['name'])
+ except Exception:
+ LOG.exception(_('Error while attempting to '
+ 'clean up failed instance launch.'))
+
+ spawn_time = time.time() - spawn_start
+ LOG.info(_("Instance spawned in %s seconds") % spawn_time,
+ instance=instance)
- def destroy(self, instance_name):
+ def destroy(self, instance_name, destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
:param instance_name: Instance name.
"""
try:
- self._cleanup(instance_name)
+ self._cleanup(instance_name, destroy_disks)
except exception.PowerVMLPARInstanceNotFound:
LOG.warn(_("During destroy, LPAR instance '%s' was not found on "
"PowerVM system.") % instance_name)
- def _cleanup(self, instance_name):
+ def capture_image(self, context, instance, image_id, image_meta):
+ """Capture the root disk for a snapshot
+
+ :param context: nova context for this operation
+ :param instance: instance information to capture the image from
+ :param image_id: uuid of pre-created snapshot image
+ :param image_meta: metadata to upload with captured image
+ """
+ lpar = self._operator.get_lpar(instance['name'])
+ previous_state = lpar['state']
+
+ # stop the instance if it is running
+ if previous_state == 'Running':
+ LOG.debug(_("Stopping instance %s for snapshot.") %
+ instance['name'])
+ # wait up to 2 minutes for shutdown
+ self.power_off(instance['name'], timeout=120)
+
+ # get disk_name
+ vhost = self._operator.get_vhost_by_instance_id(lpar['lpar_id'])
+ disk_name = self._operator.get_disk_name_by_vhost(vhost)
+
+ # do capture and upload
+ self._disk_adapter.create_image_from_volume(
+ disk_name, context, image_id, image_meta)
+
+ # restart instance if it was running before
+ if previous_state == 'Running':
+ self.power_on(instance['name'])
+
+ def _cleanup(self, instance_name, destroy_disks=True):
lpar_id = self._get_instance(instance_name)['lpar_id']
try:
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
@@ -286,9 +326,11 @@ class PowerVMOperator(object):
LOG.debug(_("Shutting down the instance '%s'") % instance_name)
self._operator.stop_lpar(instance_name)
- if disk_name:
- LOG.debug(_("Removing the logical volume '%s'") % disk_name)
- self._operator.remove_logical_volume(disk_name)
+ if disk_name and destroy_disks:
+ # TODO(mrodden): we should also detach from the instance
+ # before we start deleting things...
+ self._disk_adapter.detach_volume_from_host(disk_name)
+ self._disk_adapter.delete_volume(disk_name)
LOG.debug(_("Deleting the LPAR instance '%s'") % instance_name)
self._operator.remove_lpar(instance_name)
@@ -431,20 +473,6 @@ class BaseOperator(object):
return None
- def get_disk_name_by_vhost(self, vhost):
- """Returns the disk name attached to a vhost.
-
- :param vhost: a vhost name
- :returns: string -- disk name
- """
- cmd = self.command.lsmap('-vadapter %s -field backing -fmt :'
- % vhost)
- output = self.run_command(cmd)
- if output:
- return output[0]
-
- return None
-
def get_hostname(self):
"""Returns the managed system hostname.
@@ -453,148 +481,18 @@ class BaseOperator(object):
output = self.run_command(self.command.hostname())
return output[0]
- def remove_disk(self, disk_name):
- """Removes a disk.
-
- :param disk: a disk name
- """
- self.run_command(self.command.rmdev('-dev %s' % disk_name))
-
- def create_logical_volume(self, size):
- """Creates a logical volume with a minimum size.
+ def get_disk_name_by_vhost(self, vhost):
+ """Returns the disk name attached to a vhost.
- :param size: size of the logical volume in bytes
- :returns: string -- the name of the new logical volume.
- :raises: PowerVMNoSpaceLeftOnVolumeGroup
+ :param vhost: a vhost name
+ :returns: string -- disk name
"""
- vgs = self.run_command(self.command.lsvg())
- cmd = self.command.lsvg('%s -field vgname freepps -fmt :'
- % ' '.join(vgs))
+ cmd = self.command.lsmap('-vadapter %s -field backing -fmt :' % vhost)
output = self.run_command(cmd)
- found_vg = None
-
- # If it's not a multiple of 1MB we get the next
- # multiple and use it as the megabyte_size.
- megabyte = 1024 * 1024
- if (size % megabyte) != 0:
- megabyte_size = int(size / megabyte) + 1
- else:
- megabyte_size = size / megabyte
-
- # Search for a volume group with enough free space for
- # the new logical volume.
- for vg in output:
- # Returned output example: 'rootvg:396 (25344 megabytes)'
- match = re.search(r'^(\w+):\d+\s\((\d+).+$', vg)
- if match is None:
- continue
- vg_name, avail_size = match.groups()
- if megabyte_size <= int(avail_size):
- found_vg = vg_name
- break
-
- if not found_vg:
- LOG.error(_('Could not create logical volume. '
- 'No space left on any volume group.'))
- raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
-
- cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))
- lv_name, = self.run_command(cmd)
- return lv_name
-
- def remove_logical_volume(self, lv_name):
- """Removes the lv and the connection between its associated vscsi.
-
- :param lv_name: a logical volume name
- """
- cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)
- self.run_command(cmd)
-
- def copy_file_to_device(self, source_path, device):
- """Copy file to device.
-
- :param source_path: path to input source file
- :param device: output device name
- """
- cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
- self.run_command_as_root(cmd)
-
- def copy_image_file(self, source_path, remote_path):
- """Copy file to VIOS, decompress it, and return its new size and name.
+ if output:
+ return output[0]
- :param source_path: source file path
- :param remote_path remote file path
- """
- # Calculate source image checksum
- hasher = hashlib.md5()
- block_size = 0x10000
- img_file = file(source_path, 'r')
- buf = img_file.read(block_size)
- while len(buf) > 0:
- hasher.update(buf)
- buf = img_file.read(block_size)
- source_cksum = hasher.hexdigest()
-
- comp_path = remote_path + os.path.basename(source_path)
- uncomp_path = comp_path.rstrip(".gz")
- final_path = "%s.%s" % (uncomp_path, source_cksum)
-
- # Check whether the uncompressed image is already on IVM
- output = self.run_command("ls %s" % final_path, check_exit_code=False)
-
- # If the image does not exist already
- if not len(output):
- # Copy file to IVM
- common.ftp_put_command(self.connection_data, source_path,
- remote_path)
-
- # Verify image file checksums match
- cmd = ("/usr/bin/csum -h MD5 %s |"
- "/usr/bin/awk '{print $1}'" % comp_path)
- output = self.run_command_as_root(cmd)
- if not len(output):
- LOG.error(_("Unable to get checksum"))
- raise exception.PowerVMFileTransferFailed()
- if source_cksum != output[0]:
- LOG.error(_("Image checksums do not match"))
- raise exception.PowerVMFileTransferFailed()
-
- # Unzip the image
- cmd = "/usr/bin/gunzip %s" % comp_path
- output = self.run_command_as_root(cmd)
-
- # Remove existing image file
- cmd = "/usr/bin/rm -f %s.*" % uncomp_path
- output = self.run_command_as_root(cmd)
-
- # Rename unzipped image
- cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
- output = self.run_command_as_root(cmd)
-
- # Remove compressed image file
- cmd = "/usr/bin/rm -f %s" % comp_path
- output = self.run_command_as_root(cmd)
-
- # Calculate file size in multiples of 512 bytes
- output = self.run_command("ls -o %s|awk '{print $4}'"
- % final_path, check_exit_code=False)
- if len(output):
- size = int(output[0])
- else:
- LOG.error(_("Uncompressed image file not found"))
- raise exception.PowerVMFileTransferFailed()
- if (size % 512 != 0):
- size = (int(size / 512) + 1) * 512
-
- return final_path, size
-
- def run_cfg_dev(self, device_name):
- """Run cfgdev command for a specific device.
-
- :param device_name: device name the cfgdev command will run.
- """
- cmd = self.command.cfgdev('-dev %s' % device_name)
- self.run_command(cmd)
+ return None
def attach_disk_to_vhost(self, disk, vhost):
"""Attach disk name to a specific vhost.
diff --git a/nova/virt/storage_users.py b/nova/virt/storage_users.py
new file mode 100644
index 000000000..6555609a4
--- /dev/null
+++ b/nova/virt/storage_users.py
@@ -0,0 +1,63 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Michael Still and Canonical Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import json
+import os
+import time
+
+from nova.openstack.common import lockutils
+
+
+TWENTY_FOUR_HOURS = 3600 * 24
+
+
+@lockutils.synchronized('storage-registry-lock', 'nova-', external=True)
+def register_storage_use(storage_path, hostname):
+ """Idenfity the id of this instance storage."""
+
+ # NOTE(mikal): this is required to determine if the instance storage is
+ # shared, which is something that the image cache manager needs to
+ # know. I can imagine other uses as well though.
+
+ d = {}
+ id_path = os.path.join(storage_path, 'compute_nodes')
+ if os.path.exists(id_path):
+ with open(id_path) as f:
+ d = json.loads(f.read())
+
+ d[hostname] = time.time()
+
+ with open(id_path, 'w') as f:
+ f.write(json.dumps(d))
+
+
+@lockutils.synchronized('storage-registry-lock', 'nova-', external=True)
+def get_storage_users(storage_path):
+ """Get a list of all the users of this storage path."""
+
+ d = {}
+ id_path = os.path.join(storage_path, 'compute_nodes')
+ if os.path.exists(id_path):
+ with open(id_path) as f:
+ d = json.loads(f.read())
+
+ recent_users = []
+ for node in d:
+ if time.time() - d[node] < TWENTY_FOUR_HOURS:
+ recent_users.append(node)
+
+ return recent_users
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
index 13aaa7e4d..53cbabc30 100644
--- a/nova/virt/virtapi.py
+++ b/nova/virt/virtapi.py
@@ -15,9 +15,6 @@
# under the License.
-from nova import db
-
-
class VirtAPI(object):
def instance_update(self, context, instance_uuid, updates):
"""Perform an instance update operation on behalf of a virt driver
@@ -42,3 +39,61 @@ class VirtAPI(object):
:param host: host running instances to be returned
"""
raise NotImplementedError()
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ """Get a list of aggregates to which the specified host belongs
+ :param context: security context
+ :param host: the host for which aggregates should be returned
+ :param key: optionally filter by hosts with the given metadata key
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_add(self, context, aggregate, metadata,
+ set_delete=False):
+ """Add/update metadata for specified aggregate
+ :param context: security context
+ :param aggregate: aggregate on which to update metadata
+ :param metadata: dict of metadata to add/update
+ :param set_delete: if True, only add
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_delete(self, context, aggregate, key):
+ """Delete the given metadata key from specified aggregate
+ :param context: security context
+ :param aggregate: aggregate from which to delete metadata
+ :param key: metadata key to delete
+ """
+ raise NotImplementedError()
+
+ def security_group_get_by_instance(self, context, instance):
+ """Get the security group for a specified instance
+ :param context: security context
+ :param instance: instance defining the security group we want
+ """
+ raise NotImplementedError()
+
+ def security_group_rule_get_by_security_group(self, context,
+ security_group):
+ """Get the rules associated with a specified security group
+ :param context: security context
+ :param security_group: the security group for which the rules
+ should be returned
+ """
+ raise NotImplementedError()
+
+ def provider_fw_rule_get_all(self, context):
+ """Get the provider firewall rules
+ :param context: security context
+ """
+ raise NotImplementedError()
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ """Get information about the available agent builds for a given
+ hypervisor, os, and architecture
+ :param context: security context
+ :param hypervisor: agent hypervisor type
+ :param os: agent operating system type
+ :param architecture: agent architecture
+ """
+ raise NotImplementedError()
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index fa6f6ceb5..66e7d9b02 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -18,4 +18,4 @@
:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
-from nova.virt.vmwareapi.driver import VMWareESXDriver
+from nova.virt.vmwareapi.driver import VMwareESXDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 25cd05778..986c4ef28 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -36,9 +36,7 @@ import time
from eventlet import event
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -54,35 +52,32 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMWare ESX host.Required if '
- 'compute_driver is vmwareapi.VMWareESXDriver.'),
+ help='URL for connection to VMware ESX host.Required if '
+ 'compute_driver is vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
- help='Username for connection to VMWare ESX host. '
+ help='Username for connection to VMware ESX host. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
- help='Password for connection to VMWare ESX host. '
+ help='Password for connection to VMware ESX host. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver.'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if compute_driver is '
- 'vmwareapi.VMWareESXDriver.'),
- cfg.StrOpt('vmwareapi_vlan_interface',
- default='vmnic0',
- help='Physical ethernet adapter name for vlan networking'),
+ 'vmwareapi.VMwareESXDriver.'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(vmwareapi_opts)
TIME_BETWEEN_API_CALL_RETRIES = 2.0
@@ -98,11 +93,11 @@ class Failure(Exception):
return str(self.details)
-class VMWareESXDriver(driver.ComputeDriver):
+class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, virtapi, read_only=False, scheme="https"):
- super(VMWareESXDriver, self).__init__(virtapi)
+ super(VMwareESXDriver, self).__init__(virtapi)
host_ip = CONF.vmwareapi_host_ip
host_username = CONF.vmwareapi_host_username
@@ -112,17 +107,20 @@ class VMWareESXDriver(driver.ComputeDriver):
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
- "compute_driver=vmwareapi.VMWareESXDriver"))
+ "compute_driver=vmwareapi.VMwareESXDriver"))
- session = VMWareAPISession(host_ip, host_username, host_password,
+ session = VMwareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
- self._vmops = vmops.VMWareVMOps(session)
+ self._vmops = vmops.VMwareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
+ def legacy_nwinfo(self):
+ return True
+
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
@@ -132,18 +130,19 @@ class VMWareESXDriver(driver.ComputeDriver):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
- def snapshot(self, context, instance, name):
+ def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
- self._vmops.snapshot(context, instance, name)
+ self._vmops.snapshot(context, instance, name, update_task_state)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy VM instance."""
- self._vmops.destroy(instance, network_info)
+ self._vmops.destroy(instance, network_info, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
@@ -157,7 +156,7 @@ class VMWareESXDriver(driver.ComputeDriver):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
- def resume(self, instance):
+ def resume(self, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
@@ -174,7 +173,7 @@ class VMWareESXDriver(driver.ComputeDriver):
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
- """Return volume connector information"""
+ """Return volume connector information."""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
@@ -183,11 +182,11 @@ class VMWareESXDriver(driver.ComputeDriver):
'host': None
}
- def attach_volume(self, connection_info, instance_name, mountpoint):
+ def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
pass
- def detach_volume(self, connection_info, instance_name, mountpoint):
+ def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
pass
@@ -197,23 +196,10 @@ class VMWareESXDriver(driver.ComputeDriver):
'username': CONF.vmwareapi_host_username,
'password': CONF.vmwareapi_host_password}
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""This method is supported only by libvirt."""
return
- def host_power_action(self, host, action):
- """Reboots, shuts down or powers up the host."""
- raise NotImplementedError()
-
- def host_maintenance_mode(self, host, mode):
- """Start/Stop host maintenance window. On start, it triggers
- guest VMs evacuation."""
- raise NotImplementedError()
-
- def set_host_enabled(self, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- raise NotImplementedError()
-
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
@@ -223,7 +209,7 @@ class VMWareESXDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance, network_info)
-class VMWareAPISession(object):
+class VMwareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
@@ -372,8 +358,8 @@ class VMWareAPISession(object):
The task is polled until it completes.
"""
done = event.Event()
- loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
- done)
+ loop = utils.FixedIntervalLoopingCall(self._poll_task, instance_uuid,
+ task_ref, done)
loop.start(CONF.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index fdf85dc8b..3f5041c22 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -16,7 +16,7 @@
# under the License.
"""
-A fake VMWare VI API implementation.
+A fake VMware VI API implementation.
"""
import pprint
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_util.py
index a3b20137d..a3b20137d 100644
--- a/nova/virt/vmwareapi/network_utils.py
+++ b/nova/virt/vmwareapi/network_util.py
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index d8840938d..39ea8e2e8 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -27,7 +27,6 @@ import urllib
import urllib2
import urlparse
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -109,8 +108,8 @@ class VMwareHTTPFile(object):
raise NotImplementedError
-class VMWareHTTPWriteFile(VMwareHTTPFile):
- """VMWare file write handler class."""
+class VMwareHTTPWriteFile(VMwareHTTPFile):
+ """VMware file write handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, file_size, scheme="https"):
@@ -141,12 +140,12 @@ class VMWareHTTPWriteFile(VMwareHTTPFile):
self.conn.getresponse()
except Exception, excep:
LOG.debug(_("Exception during HTTP connection close in "
- "VMWareHTTpWrite. Exception is %s") % excep)
- super(VMWareHTTPWriteFile, self).close()
+ "VMwareHTTpWrite. Exception is %s") % excep)
+ super(VMwareHTTPWriteFile, self).close()
-class VmWareHTTPReadFile(VMwareHTTPFile):
- """VMWare file read handler class."""
+class VMwareHTTPReadFile(VMwareHTTPFile):
+ """VMware file read handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, scheme="https"):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index c98bde68c..c5b524186 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -15,76 +15,66 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""VIF drivers for VMWare."""
+"""VIF drivers for VMware."""
-from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova.virt import vif
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
LOG = logging.getLogger(__name__)
-CONF = config.CONF
-CONF.set_default('vmwareapi_vlan_interface', 'vmnic0')
+CONF = cfg.CONF
+vmwareapi_vif_opts = [
+ cfg.StrOpt('vmwareapi_vlan_interface',
+ default='vmnic0',
+ help='Physical ethernet adapter name for vlan networking'),
+]
-class VMWareVlanBridgeDriver(vif.VIFDriver):
- """VIF Driver to setup bridge/VLAN networking using VMWare API."""
+CONF.register_opts(vmwareapi_vif_opts)
- def plug(self, instance, vif):
- """Plug the VIF to specified instance using information passed.
- Currently we are plugging the VIF(s) during instance creation itself.
- We can use this method when we add support to add additional NIC to
- an existing instance."""
- pass
- def ensure_vlan_bridge(self, session, network):
- """Create a vlan and bridge unless they already exist."""
- vlan_num = network['vlan']
- bridge = network['bridge']
- vlan_interface = CONF.vmwareapi_vlan_interface
+def ensure_vlan_bridge(self, session, network):
+ """Create a vlan and bridge unless they already exist."""
+ vlan_num = network['vlan']
+ bridge = network['bridge']
+ vlan_interface = CONF.vmwareapi_vlan_interface
- # Check if the vlan_interface physical network adapter exists on the
- # host.
- if not network_utils.check_if_vlan_interface_exists(session,
- vlan_interface):
- raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
-
- # Get the vSwitch associated with the Physical Adapter
- vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
- session, vlan_interface)
- if vswitch_associated is None:
- raise exception.SwitchNotFoundForNetworkAdapter(
- adapter=vlan_interface)
- # Check whether bridge already exists and retrieve the the ref of the
- # network whose name_label is "bridge"
- network_ref = network_utils.get_network_with_the_name(session, bridge)
- if network_ref is None:
- # Create a port group on the vSwitch associated with the
- # vlan_interface corresponding physical network adapter on the ESX
- # host.
- network_utils.create_port_group(session, bridge,
- vswitch_associated, vlan_num)
- else:
- # Get the vlan id and vswitch corresponding to the port group
- _get_pg_info = network_utils.get_vlanid_and_vswitch_for_portgroup
- pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)
+ # Check if the vlan_interface physical network adapter exists on the
+ # host.
+ if not network_util.check_if_vlan_interface_exists(session,
+ vlan_interface):
+ raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
- # Check if the vswitch associated is proper
- if pg_vswitch != vswitch_associated:
- raise exception.InvalidVLANPortGroup(
- bridge=bridge, expected=vswitch_associated,
- actual=pg_vswitch)
+ # Get the vSwitch associated with the Physical Adapter
+ vswitch_associated = network_util.get_vswitch_for_vlan_interface(
+ session, vlan_interface)
+ if vswitch_associated is None:
+ raise exception.SwitchNotFoundForNetworkAdapter(
+ adapter=vlan_interface)
+ # Check whether bridge already exists and retrieve the the ref of the
+ # network whose name_label is "bridge"
+ network_ref = network_util.get_network_with_the_name(session, bridge)
+ if network_ref is None:
+ # Create a port group on the vSwitch associated with the
+ # vlan_interface corresponding physical network adapter on the ESX
+ # host.
+ network_util.create_port_group(session, bridge,
+ vswitch_associated, vlan_num)
+ else:
+ # Get the vlan id and vswitch corresponding to the port group
+ _get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
+ pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)
- # Check if the vlan id is proper for the port group
- if pg_vlanid != vlan_num:
- raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num,
- pgroup=pg_vlanid)
+ # Check if the vswitch associated is proper
+ if pg_vswitch != vswitch_associated:
+ raise exception.InvalidVLANPortGroup(
+ bridge=bridge, expected=vswitch_associated,
+ actual=pg_vswitch)
- def unplug(self, instance, vif):
- """Cleanup operations like deleting port group if no instance
- is associated with it."""
- pass
+ # Check if the vlan id is proper for the port group
+ if pg_vlanid != vlan_num:
+ raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num,
+ pgroup=pg_vlanid)
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index c00617f4b..83d120df5 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -26,8 +27,6 @@ try:
except ImportError:
suds = None
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.virt.vmwareapi import error_util
@@ -42,7 +41,7 @@ vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc',
'Due to a bug in vSphere ESX 4.1 default wsdl. '
'Refer readme-vmware to setup')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(vmwareapi_wsdl_loc_opt)
@@ -103,69 +102,65 @@ class Vim:
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
- try:
- return object.__getattr__(self, attr_name)
- except AttributeError:
-
- def vim_request_handler(managed_object, **kwargs):
- """
- Builds the SOAP message and parses the response for fault
- checking and other errors.
-
- managed_object : Managed Object Reference or Managed
- Object Name
- **kwargs : Keyword arguments of the call
- """
- # Dynamic handler for VI SDK Calls
- try:
- request_mo = self._request_managed_object_builder(
- managed_object)
- request = getattr(self.client.service, attr_name)
- response = request(request_mo, **kwargs)
- # To check for the faults that are part of the message body
- # and not returned as Fault object response from the ESX
- # SOAP server
- if hasattr(error_util.FaultCheckers,
- attr_name.lower() + "_fault_checker"):
- fault_checker = getattr(error_util.FaultCheckers,
- attr_name.lower() + "_fault_checker")
- fault_checker(response)
- return response
- # Catch the VimFaultException that is raised by the fault
- # check of the SOAP response
- except error_util.VimFaultException, excep:
- raise
- except suds.WebFault, excep:
- doc = excep.document
- detail = doc.childAtPath("/Envelope/Body/Fault/detail")
- fault_list = []
- for child in detail.getChildren():
- fault_list.append(child.get("type"))
- raise error_util.VimFaultException(fault_list, excep)
- except AttributeError, excep:
- raise error_util.VimAttributeError(_("No such SOAP method "
- "'%s' provided by VI SDK") % (attr_name), excep)
- except (httplib.CannotSendRequest,
- httplib.ResponseNotReady,
- httplib.CannotSendHeader), excep:
- raise error_util.SessionOverLoadException(_("httplib "
- "error in %s: ") % (attr_name), excep)
- except Exception, excep:
- # Socket errors which need special handling for they
- # might be caused by ESX API call overload
- if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
- str(excep).find(CONN_ABORT_ERROR)) != -1:
- raise error_util.SessionOverLoadException(_("Socket "
- "error in %s: ") % (attr_name), excep)
- # Type error that needs special handling for it might be
- # caused by ESX host API call overload
- elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
- raise error_util.SessionOverLoadException(_("Type "
- "error in %s: ") % (attr_name), excep)
- else:
- raise error_util.VimException(
- _("Exception in %s ") % (attr_name), excep)
- return vim_request_handler
+ def vim_request_handler(managed_object, **kwargs):
+ """
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ managed_object : Managed Object Reference or Managed
+ Object Name
+ **kwargs : Keyword arguments of the call
+ """
+ # Dynamic handler for VI SDK Calls
+ try:
+ request_mo = self._request_managed_object_builder(
+ managed_object)
+ request = getattr(self.client.service, attr_name)
+ response = request(request_mo, **kwargs)
+ # To check for the faults that are part of the message body
+ # and not returned as Fault object response from the ESX
+ # SOAP server
+ if hasattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker"):
+ fault_checker = getattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker")
+ fault_checker(response)
+ return response
+ # Catch the VimFaultException that is raised by the fault
+ # check of the SOAP response
+ except error_util.VimFaultException, excep:
+ raise
+ except suds.WebFault, excep:
+ doc = excep.document
+ detail = doc.childAtPath("/Envelope/Body/Fault/detail")
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get("type"))
+ raise error_util.VimFaultException(fault_list, excep)
+ except AttributeError, excep:
+ raise error_util.VimAttributeError(_("No such SOAP method "
+ "'%s' provided by VI SDK") % (attr_name), excep)
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader), excep:
+ raise error_util.SessionOverLoadException(_("httplib "
+ "error in %s: ") % (attr_name), excep)
+ except Exception, excep:
+ # Socket errors which need special handling for they
+ # might be caused by ESX API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket "
+ "error in %s: ") % (attr_name), excep)
+ # Type error that needs special handling for it might be
+ # caused by ESX host API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type "
+ "error in %s: ") % (attr_name), excep)
+ else:
+ raise error_util.VimException(
+ _("Exception in %s ") % (attr_name), excep)
+ return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 740355679..e03b88804 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -26,7 +26,7 @@ def build_datastore_path(datastore_name, path):
def split_datastore_path(datastore_path):
"""
- Split the VMWare style datastore path to get the Datastore
+ Split the VMware style datastore path to get the Datastore
name and the entity path.
"""
spl = datastore_path.split('[', 1)[1].split(']', 1)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index ab1eda134..883e751a8 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,24 +27,19 @@ import urllib2
import uuid
from nova.compute import power_state
-from nova import config
+from nova.compute import task_states
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
-from nova.virt.vmwareapi import network_utils
+from nova.virt.vmwareapi import network_util
+from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmware_images
-vmware_vif_driver_opt = cfg.StrOpt('vmware_vif_driver',
- default='nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
- help='The VMWare VIF driver to configure the VIFs.')
-
-CONF = config.CONF
-CONF.register_opt(vmware_vif_driver_opt)
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -54,13 +49,12 @@ VMWARE_POWER_STATES = {
'suspended': power_state.PAUSED}
-class VMWareVMOps(object):
+class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session):
"""Initializer."""
self._session = session
- self._vif_driver = importutils.import_object(CONF.vmware_vif_driver)
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
@@ -163,7 +157,7 @@ class VMWareVMOps(object):
vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
def _check_if_network_bridge_exists(network_name):
- network_ref = network_utils.get_network_with_the_name(
+ network_ref = network_util.get_network_with_the_name(
self._session, network_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=network_name)
@@ -175,8 +169,8 @@ class VMWareVMOps(object):
mac_address = mapping['mac']
network_name = network['bridge']
if mapping.get('should_create_vlan'):
- network_ref = self._vif_driver.ensure_vlan_bridge(
- self._session, network)
+ network_ref = vmwarevif.ensure_vlan_bridge(
+ self._session, network)
else:
network_ref = _check_if_network_bridge_exists(network_name)
vif_infos.append({'network_name': network_name,
@@ -340,7 +334,7 @@ class VMWareVMOps(object):
LOG.debug(_("Powered on the VM instance"), instance=instance)
_power_on_vm()
- def snapshot(self, context, instance, snapshot_name):
+ def snapshot(self, context, instance, snapshot_name, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
@@ -397,6 +391,7 @@ class VMWareVMOps(object):
instance=instance)
_create_vm_snapshot()
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
@@ -475,6 +470,8 @@ class VMWareVMOps(object):
LOG.debug(_("Uploaded image %s") % snapshot_name,
instance=instance)
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
@@ -542,7 +539,7 @@ class VMWareVMOps(object):
self._session._wait_for_task(instance['uuid'], reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
- def destroy(self, instance, network_info):
+ def destroy(self, instance, network_info, destroy_disks=True):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
@@ -593,30 +590,32 @@ class VMWareVMOps(object):
# Delete the folder holding the VM related content on
# the datastore.
- try:
- dir_ds_compliant_path = vm_util.build_datastore_path(
- datastore_name,
- os.path.dirname(vmx_file_path))
- LOG.debug(_("Deleting contents of the VM from "
- "datastore %(datastore_name)s") %
- {'datastore_name': datastore_name},
- instance=instance)
- delete_task = self._session._call_method(
- self._session._get_vim(),
- "DeleteDatastoreFile_Task",
- self._session._get_vim().get_service_content().fileManager,
- name=dir_ds_compliant_path)
- self._session._wait_for_task(instance['uuid'], delete_task)
- LOG.debug(_("Deleted contents of the VM from "
- "datastore %(datastore_name)s") %
- {'datastore_name': datastore_name},
- instance=instance)
- except Exception, excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, "
- "got this exception while deleting"
- " the VM contents from the disk: %s")
- % str(excep),
- instance=instance)
+ if destroy_disks:
+ try:
+ dir_ds_compliant_path = vm_util.build_datastore_path(
+ datastore_name,
+ os.path.dirname(vmx_file_path))
+ LOG.debug(_("Deleting contents of the VM from "
+ "datastore %(datastore_name)s") %
+ {'datastore_name': datastore_name},
+ instance=instance)
+ vim = self._session._get_vim()
+ delete_task = self._session._call_method(
+ vim,
+ "DeleteDatastoreFile_Task",
+ vim.get_service_content().fileManager,
+ name=dir_ds_compliant_path)
+ self._session._wait_for_task(instance['uuid'], delete_task)
+ LOG.debug(_("Deleted contents of the VM from "
+ "datastore %(datastore_name)s") %
+ {'datastore_name': datastore_name},
+ instance=instance)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, "
+ "got this exception while deleting"
+ " the VM contents from the disk: %s")
+ % str(excep),
+ instance=instance)
except Exception, exc:
LOG.exception(exc, instance=instance)
@@ -825,10 +824,8 @@ class VMWareVMOps(object):
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
- for (network, mapping) in network_info:
- self._vif_driver.plug(instance, (network, mapping))
+ pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
- for (network, mapping) in network_info:
- self._vif_driver.unplug(instance, (network, mapping))
+ pass
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 15237fd5b..7c4480ea0 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -50,11 +50,11 @@ def start_transfer(context, read_file_handle, data_size,
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
- # In case of Glance - VMWare transfer, we just need a handle to the
- # HTTP Connection that is to send transfer data to the VMWare datastore.
+ # In case of Glance - VMware transfer, we just need a handle to the
+ # HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
- # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read
+ # In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glnace changing to active.
# The GlanceWriteThread handles the same for us.
@@ -96,7 +96,7 @@ def fetch_image(context, image, instance, **kwargs):
f = StringIO.StringIO()
image_service.download(context, image_id, f)
read_file_handle = read_write_util.GlanceFileRead(f)
- write_file_handle = read_write_util.VMWareHTTPWriteFile(
+ write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
@@ -113,7 +113,7 @@ def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug(_("Uploading image %s to the Glance image server") % image,
instance=instance)
- read_file_handle = read_write_util.VmWareHTTPReadFile(
+ read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 9fad07898..61cfa9631 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -21,8 +21,6 @@ import os
import time
import uuid
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -50,14 +48,14 @@ xenapi_agent_opts = [
'configuration is not injected into the image. '
'Used if compute_driver=xenapi.XenAPIDriver and '
' flat_injected=True'),
- cfg.StrOpt('xenapi_disable_agent',
+ cfg.BoolOpt('xenapi_disable_agent',
default=False,
help='Disable XenAPI agent. Reduces the amount of time '
'it takes nova to detect that a VM has started, when '
'that VM does not have the agent installed'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xenapi_agent_opts)
@@ -309,7 +307,7 @@ class SimpleDH(object):
@staticmethod
def mod_exp(num, exp, mod):
- """Efficient implementation of (num ** exp) % mod"""
+ """Efficient implementation of (num ** exp) % mod."""
result = 1
while exp > 0:
if (exp & 1) == 1:
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 55b67a931..0acc360e8 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -39,18 +39,14 @@ A driver for XenServer or Xen Cloud Platform.
import contextlib
import cPickle as pickle
-import time
import urlparse
import xmlrpclib
from eventlet import queue
from eventlet import timeout
-from nova import config
from nova import context
-from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
@@ -119,12 +115,13 @@ xenapi_opts = [
help='Timeout in seconds for XenAPI login.'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
+CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
- """A connection to XenServer or Xen Cloud Platform"""
+ """A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
@@ -138,14 +135,14 @@ class XenAPIDriver(driver.ComputeDriver):
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
- self._session = XenAPISession(url, username, password)
+ self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
- self._pool = pool.ResourcePool(self._session)
+ self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
@@ -163,55 +160,45 @@ class XenAPIDriver(driver.ComputeDriver):
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
- """List VM instances"""
+ """List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- """Create VM instance"""
+ """Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
- self._vmops.finish_revert_migration(instance)
- self._attach_mapped_block_devices(instance, block_device_info)
+ self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
- network_info, image_meta, resize_instance)
- self._attach_mapped_block_devices(instance, block_device_info)
+ network_info, image_meta, resize_instance,
+ block_device_info)
- def _attach_mapped_block_devices(self, instance, block_device_info):
- block_device_mapping = driver.block_device_info_get_mapping(
- block_device_info)
- for vol in block_device_mapping:
- connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
- self.attach_volume(connection_info,
- instance['name'], mount_device)
-
- def snapshot(self, context, instance, image_id):
- """ Create snapshot from a running VM instance """
- self._vmops.snapshot(context, instance, image_id)
+ def snapshot(self, context, instance, image_id, update_task_state):
+ """Create snapshot from a running VM instance."""
+ self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
- """Reboot VM instance"""
+ """Reboot VM instance."""
self._vmops.reboot(instance, reboot_type)
def set_admin_password(self, instance, new_pass):
- """Set the root/admin password on the VM instance"""
+ """Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
@@ -224,16 +211,18 @@ class XenAPIDriver(driver.ComputeDriver):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
- def destroy(self, instance, network_info, block_device_info=None):
- """Destroy VM instance"""
- self._vmops.destroy(instance, network_info, block_device_info)
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
+ """Destroy VM instance."""
+ self._vmops.destroy(instance, network_info, block_device_info,
+ destroy_disks)
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -255,53 +244,49 @@ class XenAPIDriver(driver.ComputeDriver):
return rv
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
self._vmops.suspend(instance)
- def resume(self, instance):
- """resume the specified instance"""
+ def resume(self, instance, network_info, block_device_info=None):
+ """resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
- """Soft delete the specified instance"""
+ """Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
- """Poll for rebooting instances"""
+ """Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
- def poll_rescued_instances(self, timeout):
- """Poll for rescued instances"""
- self._vmops.poll_rescued_instances(timeout)
-
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
@@ -313,11 +298,11 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
- """Return data about VM instance"""
+ """Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
@@ -326,7 +311,7 @@ class XenAPIDriver(driver.ComputeDriver):
# we only care about VMs that correspond to a nova-managed
# instance:
- imap = dict([(inst.name, inst.uuid) for inst in instances])
+ imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
@@ -343,15 +328,15 @@ class XenAPIDriver(driver.ComputeDriver):
return bwcounters
def get_console_output(self, instance):
- """Return snapshot of console"""
+ """Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
- """Return link to instance's VNC console"""
+ """Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
- """Return volume connector information"""
+ """Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
@@ -372,16 +357,16 @@ class XenAPIDriver(driver.ComputeDriver):
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
- def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach volume storage to VM instance"""
+ def attach_volume(self, connection_info, instance, mountpoint):
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
- instance_name,
+ instance['name'],
mountpoint)
- def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach volume storage to VM instance"""
+ def detach_volume(self, connection_info, instance, mountpoint):
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
- instance_name,
+ instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
@@ -390,12 +375,13 @@ class XenAPIDriver(driver.ComputeDriver):
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
+ :param nodename: ignored in this driver
:returns: dictionary describing resources
"""
@@ -502,7 +488,7 @@ class XenAPIDriver(driver.ComputeDriver):
recover_method, block_migration, migrate_data)
def pre_live_migration(self, context, instance_ref, block_device_info,
- network_info):
+ network_info, migrate_data=None):
"""Preparation live migration.
:params block_device_info:
@@ -531,24 +517,24 @@ class XenAPIDriver(driver.ComputeDriver):
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
- """ Updates security group rules for all instances
- associated with a given security group
- Invoked when security group rules are updated
- """
+ """Updates security group rules for all instances associated with a
+ given security group.
+
+ Invoked when security group rules are updated."""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """ Updates security group rules for all instances
- associated with a given security group
- Invoked when instances are added/removed to a security group
- """
+ """Updates security group rules for all instances associated with a
+ given security group.
+
+ Invoked when instances are added/removed to a security group."""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """ Updates security group rules for specified instance
- Invoked when instances are added/removed to a security group
- or when a rule is added/removed to a security group
- """
+ """Updates security group rules for specified instance.
+
+ Invoked when instances are added/removed to a security group
+ or when a rule is added/removed to a security group."""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
@@ -594,11 +580,11 @@ class XenAPIDriver(driver.ComputeDriver):
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
- def undo_aggregate_operation(self, context, op, aggregate_id,
+ def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
- aggregate_id, host, set_error)
+ aggregate, host, set_error)
def legacy_nwinfo(self):
"""
@@ -609,14 +595,22 @@ class XenAPIDriver(driver.ComputeDriver):
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
self._vmops.power_on(instance)
+ def get_per_instance_usage(self):
+ """Get information about instance resource usage.
+
+ :returns: dict of nova uuid => dict of usage
+ info
+ """
+ return self._vmops.get_per_instance_usage()
+
class XenAPISession(object):
- """The session to invoke XenAPI SDK calls"""
+ """The session to invoke XenAPI SDK calls."""
- def __init__(self, url, user, pw):
+ def __init__(self, url, user, pw, virtapi):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
@@ -628,6 +622,7 @@ class XenAPISession(object):
self.host_uuid = self._get_host_uuid()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
+ self._virtapi = virtapi
def _create_first_session(self, url, user, pw, exception):
try:
@@ -656,8 +651,9 @@ class XenAPISession(object):
def _get_host_uuid(self):
if self.is_slave:
- aggr = db.aggregate_get_by_host(context.get_admin_context(),
- CONF.host, key=pool_states.POOL_FLAG)[0]
+ aggr = self._virtapi.aggregate_get_by_host(
+ context.get_admin_context(),
+ CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
@@ -695,7 +691,7 @@ class XenAPISession(object):
@contextlib.contextmanager
def _get_session(self):
- """Return exclusive session for scope of with statement"""
+ """Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
@@ -739,7 +735,7 @@ class XenAPISession(object):
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
- """Parse exception details"""
+ """Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index db4f5d03e..666e46754 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -635,6 +635,14 @@ class SessionBase(object):
db_ref['power_state'] = 'Halted'
VM_clean_shutdown = VM_hard_shutdown
+ def VM_suspend(self, session, vm_ref):
+ db_ref = _db_content['VM'][vm_ref]
+ db_ref['power_state'] = 'Suspended'
+
+ def VM_pause(self, session, vm_ref):
+ db_ref = _db_content['VM'][vm_ref]
+ db_ref['power_state'] = 'Paused'
+
def pool_eject(self, session, host_ref):
pass
@@ -719,6 +727,8 @@ class SessionBase(object):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
+ elif name == 'XenAPI':
+ return FakeXenAPI()
else:
return None
@@ -882,6 +892,11 @@ class SessionBase(object):
return result
+class FakeXenAPI(object):
+ def __init__(self):
+ self.Failure = Failure
+
+
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
index cd837e834..9c6a60d18 100644
--- a/nova/virt/xenapi/firewall.py
+++ b/nova/virt/xenapi/firewall.py
@@ -18,8 +18,6 @@
# under the License.
from nova import context
-from nova.db import api as db
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt import firewall
@@ -29,12 +27,11 @@ LOG = logging.getLogger(__name__)
class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
- """ Dom0IptablesFirewallDriver class
+ """Dom0IptablesFirewallDriver class
This class provides an implementation for nova.virt.Firewall
using iptables. This class is meant to be used with the xenapi
- backend and uses xenapi plugin to enforce iptables rules in dom0
-
+ backend and uses xenapi plugin to enforce iptables rules in dom0.
"""
def _plugin_execute(self, *cmd, **kwargs):
# Prepare arguments for plugin call
@@ -45,9 +42,9 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
json_ret = jsonutils.loads(ret)
return (json_ret['out'], json_ret['err'])
- def __init__(self, xenapi_session=None, **kwargs):
+ def __init__(self, virtapi, xenapi_session=None, **kwargs):
from nova.network import linux_net
- super(Dom0IptablesFirewallDriver, self).__init__(**kwargs)
+ super(Dom0IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
self._session = xenapi_session
# Create IpTablesManager with executor through plugin
self.iptables = linux_net.IptablesManager(self._plugin_execute)
@@ -57,15 +54,14 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def _build_tcp_udp_rule(self, rule, version):
- if rule.from_port == rule.to_port:
- return ['--dport', '%s' % (rule.from_port,)]
+ if rule['from_port'] == rule['to_port']:
+ return ['--dport', '%s' % (rule['from_port'],)]
else:
# No multiport needed for XS!
- return ['--dport', '%s:%s' % (rule.from_port,
- rule.to_port)]
+ return ['--dport', '%s:%s' % (rule['from_port'],
+ rule['to_port'])]
- @staticmethod
- def _provider_rules():
+ def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6.
Note: We could not use the common code from virt.firewall because
XS doesn't accept the '-m multiport' option"""
@@ -73,7 +69,7 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
- rules = db.provider_fw_rule_get_all(ctxt)
+ rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index f109f33f4..045409e4f 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -24,9 +24,7 @@ import logging
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
-from nova import db
from nova import exception
-from nova import notifications
from nova.openstack.common import jsonutils
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
@@ -77,34 +75,36 @@ class Host(object):
instance = self._virtapi.instance_get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
- dest = _host_find(ctxt, self._session, host, host_ref)
- (old_ref, new_ref) = self._virtapi.instance_update(
- ctxt,
- instance['uuid'],
- {'host': dest,
- 'task_state': task_states.MIGRATING})
- notifications.send_update(ctxt, old_ref, new_ref)
+ aggregate = self._virtapi.aggregate_get_by_host(
+ ctxt, host, key=pool_states.POOL_FLAG)
+ if not aggregate:
+ msg = _('Aggregate for host %(host)s count not be'
+ ' found.') % dict(host=host)
+ raise exception.NotFound(msg)
+
+ dest = _host_find(ctxt, self._session, aggregate[0],
+ host_ref)
+ self._virtapi.instance_update(
+ ctxt, instance['uuid'],
+ {'host': dest,
+ 'task_state': task_states.MIGRATING})
self._session.call_xenapi('VM.pool_migrate',
vm_ref, host_ref, {})
migrations_counter = migrations_counter + 1
- (old_ref, new_ref) = self._virtapi.instance_update(
- ctxt,
- instance['uuid'],
- {'vm_state': vm_states.ACTIVE})
- notifications.send_update(ctxt, old_ref, new_ref)
+ self._virtapi.instance_update(
+ ctxt, instance['uuid'],
+ {'vm_state': vm_states.ACTIVE})
break
except self._session.XenAPI.Failure:
- LOG.exception('Unable to migrate VM %(vm_ref)s'
- 'from %(host)s' % locals())
- (old_ref, new_ref) = self._virtapi.instance_update(
- ctxt,
- instance['uuid'],
- {'host': host,
- 'vm_state': vm_states.ACTIVE})
- notifications.send_update(ctxt, old_ref, new_ref)
+ LOG.exception(_('Unable to migrate VM %(vm_ref)s'
+ 'from %(host)s') % locals())
+ self._virtapi.instance_update(
+ ctxt, instance['uuid'],
+ {'host': host,
+ 'vm_state': vm_states.ACTIVE})
if vm_counter == migrations_counter:
return 'on_maintenance'
@@ -222,10 +222,11 @@ def _uuid_find(virtapi, context, host, name_label):
return None
-def _host_find(context, session, src, dst):
+def _host_find(context, session, src_aggregate, dst):
"""Return the host from the xenapi host reference.
- :param src: the compute host being put in maintenance (source of VMs)
+ :param src_aggregate: the aggregate that the compute host being put in
+ maintenance (source of VMs) belongs to
:param dst: the hypervisor host reference (destination of VMs)
:return: the compute host that manages dst
@@ -233,15 +234,11 @@ def _host_find(context, session, src, dst):
# NOTE: this would be a lot simpler if nova-compute stored
# CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
- aggregate = db.aggregate_get_by_host(context, src,
- key=pool_states.POOL_FLAG)[0]
- if not aggregate:
- raise exception.AggregateHostNotFound(host=src)
uuid = session.call_xenapi('host.get_record', dst)['uuid']
- for compute_host, host_uuid in aggregate.metadetails.iteritems():
+ for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
if host_uuid == uuid:
return compute_host
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
'from aggregate metadata: %(metadata)s.' %
{'host_uuid': uuid,
- 'metadata': aggregate.metadetails})
+ 'metadata': src_aggregate.metadetails})
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index fc66099ca..1682f18d1 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -22,14 +22,10 @@ Management class for Pool-related functions (join, eject, etc).
import urlparse
from nova.compute import rpcapi as compute_rpcapi
-from nova import config
-from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
@@ -41,86 +37,82 @@ xenapi_pool_opts = [
help='To use for hosts with different CPUs'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xenapi_pool_opts)
+CONF.import_opt('host', 'nova.netconf')
class ResourcePool(object):
"""
Implements resource pool operations.
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
host_ref = session.get_xenapi_host()
host_rec = session.call_xenapi('host.get_record', host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
+ self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
- def _is_hv_pool(self, context, aggregate_id):
- return pool_states.is_hv_pool(context, aggregate_id)
-
- def _get_metadata(self, context, aggregate_id):
- return db.aggregate_metadata_get(context, aggregate_id)
-
- def undo_aggregate_operation(self, context, op, aggregate_id,
+ def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
- db.aggregate_metadata_add(context, aggregate_id, metadata)
- op(context, aggregate_id, host)
+ self._virtapi.aggregate_metadata_add(context, aggregate,
+ metadata)
+ op(context, aggregate, host)
except Exception:
+ aggregate_id = aggregate['id']
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
'during operation on %(host)s') % locals())
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
"""Add a compute host to an aggregate."""
- if not self._is_hv_pool(context, aggregate.id):
+ if not pool_states.is_hv_pool(aggregate['metadetails']):
return
invalid = {pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted',
pool_states.ERROR: 'aggregate in error'}
- if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
- in invalid.keys()):
+ if (aggregate['metadetails'][pool_states.KEY] in invalid.keys()):
raise exception.InvalidAggregateAction(
action='add host',
- aggregate_id=aggregate.id,
- reason=invalid[self._get_metadata(context,
- aggregate.id)
- [pool_states.KEY]])
+ aggregate_id=aggregate['id'],
+ reason=aggregate['metadetails'][pool_states.KEY])
- if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
- == pool_states.CREATED):
- db.aggregate_metadata_add(context, aggregate.id,
- {pool_states.KEY: pool_states.CHANGING})
- if len(aggregate.hosts) == 1:
+ if (aggregate['metadetails'][pool_states.KEY] == pool_states.CREATED):
+ self._virtapi.aggregate_metadata_add(context, aggregate,
+ {pool_states.KEY:
+ pool_states.CHANGING})
+ if len(aggregate['hosts']) == 1:
# this is the first host of the pool -> make it master
- self._init_pool(aggregate.id, aggregate.name)
+ self._init_pool(aggregate['id'], aggregate['name'])
# save metadata so that we can find the master again
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(context, aggregate.id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate,
+ metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
- master_compute = self._get_metadata(context,
- aggregate.id)['master_compute']
+ master_compute = aggregate['metadetails']['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
- self._join_slave(aggregate.id, host,
+ self._join_slave(aggregate['id'], host,
slave_info.get('compute_uuid'),
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
- db.aggregate_metadata_add(context, aggregate.id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate,
+ metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
@@ -132,53 +124,52 @@ class ResourcePool(object):
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
slave_info = slave_info or dict()
- if not self._is_hv_pool(context, aggregate.id):
+ if not pool_states.is_hv_pool(aggregate['metadetails']):
return
invalid = {pool_states.CREATED: 'no hosts to remove',
pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted', }
- if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
- in invalid.keys()):
+ if aggregate['metadetails'][pool_states.KEY] in invalid.keys():
raise exception.InvalidAggregateAction(
action='remove host',
- aggregate_id=aggregate.id,
- reason=invalid[self._get_metadata(context,
- aggregate.id)[pool_states.KEY]])
+ aggregate_id=aggregate['id'],
+ reason=invalid[aggregate['metadetails'][pool_states.KEY]])
- master_compute = self._get_metadata(context,
- aggregate.id)['master_compute']
+ master_compute = aggregate['metadetails']['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
- host_uuid = self._get_metadata(context, aggregate.id)[host]
- self._eject_slave(aggregate.id,
+ host_uuid = aggregate['metadetails'][host]
+ self._eject_slave(aggregate['id'],
slave_info.get('compute_uuid'), host_uuid)
- db.aggregate_metadata_delete(context, aggregate.id, host)
+ self._virtapi.aggregate_metadata_delete(context, aggregate,
+ host)
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
- if len(aggregate.hosts) > 1:
+ if len(aggregate['hosts']) > 1:
# NOTE: this could be avoided by doing a master
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
- aggregate_id=aggregate.id,
+ aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; pool not empty')
% locals())
- self._clear_pool(aggregate.id)
+ self._clear_pool(aggregate['id'])
for key in ['master_compute', host]:
- db.aggregate_metadata_delete(context, aggregate.id, key)
+ self._virtapi.aggregate_metadata_delete(context,
+ aggregate, key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
self.compute_rpcapi.remove_aggregate_host(
- context, aggregate.id, host, master_compute, slave_info)
+ context, aggregate['id'], host, master_compute, slave_info)
else:
# this shouldn't have happened
- raise exception.AggregateError(aggregate_id=aggregate.id,
+ raise exception.AggregateError(aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; No master found')
@@ -245,7 +236,7 @@ class ResourcePool(object):
reason=str(e.details))
def _create_slave_info(self):
- """XenServer specific info needed to join the hypervisor pool"""
+ """XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index 82a85ce14..5bf326117 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -36,7 +36,6 @@ an 'active' pool goes into an 'error' state. To recover from such a state,
admin intervention is required. Currently an error state is irreversible,
that is, in order to recover from it an pool must be deleted.
"""
-from nova import db
CREATED = 'created'
CHANGING = 'changing'
@@ -49,7 +48,6 @@ KEY = 'operational_state'
POOL_FLAG = 'hypervisor_pool'
-def is_hv_pool(context, aggregate_id):
- """Checks if aggregate is a hypervisor_pool"""
- metadata = db.aggregate_metadata_get(context, aggregate_id)
+def is_hv_pool(metadata):
+ """Checks if aggregate is a hypervisor_pool."""
return POOL_FLAG in metadata.keys()
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 13dc74ba9..35cdb201d 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -19,11 +19,8 @@
"""VIF drivers for XenAPI."""
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova.virt import vif
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
@@ -32,12 +29,12 @@ xenapi_ovs_integration_bridge_opt = cfg.StrOpt('xenapi_ovs_integration_bridge',
default='xapi1',
help='Name of Integration Bridge used by Open vSwitch')
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opt(xenapi_ovs_integration_bridge_opt)
LOG = logging.getLogger(__name__)
-class XenVIFDriver(vif.VIFDriver):
+class XenVIFDriver(object):
def __init__(self, xenapi_session):
self._session = xenapi_session
@@ -73,7 +70,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
return vif_rec
def _ensure_vlan_bridge(self, network):
- """Ensure that a VLAN bridge exists"""
+ """Ensure that a VLAN bridge exists."""
vlan_num = network.get_meta('vlan')
bridge = network['bridge']
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index d0f59f56f..debba4f02 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -34,19 +34,19 @@ from xml.parsers import expat
from eventlet import greenthread
+from nova.api.metadata import base as instance_metadata
from nova import block_device
-from nova.compute import instance_types
from nova.compute import power_state
-from nova import config
-from nova import db
+from nova.compute import task_states
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt import configdrive
from nova.virt.disk import api as disk
+from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import driver
from nova.virt.xenapi import agent
from nova.virt.xenapi import volume_utils
@@ -55,6 +55,12 @@ from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
+ cfg.StrOpt('cache_images',
+ default='all',
+ help='Cache glance images locally. `all` will cache all'
+ ' images, `some` will only cache images that have the'
+ ' image_property `cache_in_nova=True`, and `none` turns'
+ ' off caching entirely'),
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type'),
@@ -117,8 +123,12 @@ xenapi_vm_utils_opts = [
' within a given dom0. (-1 = no limit)')
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xenapi_vm_utils_opts)
+CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+CONF.import_opt('use_cow_images', 'nova.virt.driver')
+CONF.import_opt('glance_num_retries', 'nova.image.glance')
+CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -145,6 +155,7 @@ class ImageType(object):
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
+ | 6 - config drive
"""
KERNEL = 0
@@ -153,7 +164,9 @@ class ImageType(object):
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
- _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
+ DISK_CONFIGDRIVE = 6
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
+ DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
@@ -161,8 +174,9 @@ class ImageType(object):
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
+ DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
- DISK_ISO_STR)
+ DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
@@ -170,17 +184,25 @@ class ImageType(object):
@classmethod
def get_role(cls, image_type_id):
- " Get the role played by the image, based on its type "
+ """Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
- cls.DISK_ISO: 'iso'
+ cls.DISK_ISO: 'iso',
+ cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
+def _system_metadata_to_dict(system_metadata):
+ result = {}
+ for item in system_metadata:
+ result[item['key']] = item['value']
+ return result
+
+
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False):
"""Create a VM record. Returns new VM reference.
@@ -194,8 +216,7 @@ def create_vm(session, instance, name_label, kernel, ramdisk,
3. Using hardware virtualization
"""
- inst_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(inst_type_id)
+ instance_type = instance['instance_type']
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
@@ -270,27 +291,47 @@ def destroy_vm(session, instance, vm_ref):
LOG.debug(_("VM destroyed"), instance=instance)
-def shutdown_vm(session, instance, vm_ref, hard=True):
- vm_rec = session.call_xenapi("VM.get_record", vm_ref)
- state = compile_info(vm_rec)['state']
- if state == power_state.SHUTDOWN:
+def clean_shutdown_vm(session, instance, vm_ref):
+ if _is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
- return
+ return False
- LOG.debug(_("Shutting down VM"), instance=instance)
+ LOG.debug(_("Shutting down VM (cleanly)"), instance=instance)
try:
- if hard:
- session.call_xenapi('VM.hard_shutdown', vm_ref)
- else:
- session.call_xenapi('VM.clean_shutdown', vm_ref)
+ session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
+ return False
+ return True
+
+
+def hard_shutdown_vm(session, instance, vm_ref):
+ if _is_vm_shutdown(session, vm_ref):
+ LOG.warn(_("VM already halted, skipping shutdown..."),
+ instance=instance)
+ return False
+
+ LOG.debug(_("Shutting down VM (hard)"), instance=instance)
+ try:
+ session.call_xenapi('VM.hard_shutdown', vm_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ return False
+ return True
+
+
+def _is_vm_shutdown(session, vm_ref):
+ vm_rec = session.call_xenapi("VM.get_record", vm_ref)
+ state = compile_info(vm_rec)['state']
+ if state == power_state.SHUTDOWN:
+ return True
+ return False
def ensure_free_mem(session, instance):
inst_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(inst_type_id)
+ instance_type = instance['instance_type']
mem = long(instance_type['memory_mb']) * 1024 * 1024
host = session.get_xenapi_host()
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
@@ -299,7 +340,7 @@ def ensure_free_mem(session, instance):
def find_vbd_by_number(session, vm_ref, number):
- """Get the VBD reference from the device number"""
+ """Get the VBD reference from the device number."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
@@ -314,7 +355,7 @@ def find_vbd_by_number(session, vm_ref, number):
def unplug_vbd(session, vbd_ref):
- """Unplug VBD from VM"""
+ """Unplug VBD from VM."""
# Call VBD.unplug on the given VBD, with a retry if we get
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
@@ -345,7 +386,7 @@ def unplug_vbd(session, vbd_ref):
def destroy_vbd(session, vbd_ref):
- """Destroy VBD from host database"""
+ """Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure, exc:
@@ -429,35 +470,29 @@ def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
def get_vdis_for_boot_from_vol(session, dev_params):
vdis = {}
- sr_uuid = dev_params['sr_uuid']
- sr_ref = volume_utils.find_sr_by_uuid(session,
- sr_uuid)
+ sr_uuid, label, sr_params = volume_utils.parse_sr_info(dev_params)
+ sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid)
# Try introducing SR if it is not present
if not sr_ref:
- if 'name_label' not in dev_params:
- label = 'tempSR-%s' % dev_params['volume_id']
- else:
- label = dev_params['name_label']
-
- if 'name_description' not in dev_params:
- desc = ''
- else:
- desc = dev_params.get('name_description')
- sr_params = {}
- for k in dev_params['introduce_sr_keys']:
- sr_params[k] = dev_params[k]
-
- sr_params['name_description'] = desc
- sr_ref = volume_utils.introduce_sr(session, sr_uuid, label,
- sr_params)
+ sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params)
if sr_ref is None:
raise exception.NovaException(_('SR not present and could not be '
'introduced'))
else:
- session.call_xenapi("SR.scan", sr_ref)
- return {'root': dict(uuid=dev_params['vdi_uuid'],
- file=None, osvol=True)}
+ if 'vdi_uuid' in dev_params:
+ session.call_xenapi("SR.scan", sr_ref)
+ vdis = {'root': dict(uuid=dev_params['vdi_uuid'],
+ file=None, osvol=True)}
+ else:
+ try:
+ vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ vdis = {'root': dict(uuid=vdi_rec['uuid'],
+ file=None, osvol=True)}
+ except volume_utils.StorageError, exc:
+ LOG.exception(exc)
+ volume_utils.forget_sr(session, sr_uuid)
return vdis
@@ -490,8 +525,7 @@ def get_vdis_for_instance(context, session, instance, name_label, image,
bdm_root_dev = block_device_info['block_device_mapping'][0]
dev_params = bdm_root_dev['connection_info']['data']
LOG.debug(dev_params)
- return get_vdis_for_boot_from_vol(session,
- dev_params)
+ return get_vdis_for_boot_from_vol(session, dev_params)
return _create_image(context, session, instance, name_label, image,
image_type)
@@ -565,7 +599,7 @@ def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
def get_vdi_for_vm_safely(session, vm_ref):
- """Retrieves the primary VDI for a VM"""
+ """Retrieves the primary VDI for a VM."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd in vbd_refs:
vbd_rec = session.call_xenapi("VBD.get_record", vbd)
@@ -578,7 +612,11 @@ def get_vdi_for_vm_safely(session, vm_ref):
@contextlib.contextmanager
-def snapshot_attached_here(session, instance, vm_ref, label):
+def snapshot_attached_here(session, instance, vm_ref, label, *args):
+ update_task_state = None
+ if len(args) == 1:
+ update_task_state = args[0]
+
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
@@ -590,6 +628,8 @@ def snapshot_attached_here(session, instance, vm_ref, label):
sr_ref = vm_vdi_rec["SR"]
snapshot_ref = session.call_xenapi("VDI.snapshot", vm_vdi_ref, {})
+ if update_task_state is not None:
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
snapshot_rec = session.call_xenapi("VDI.get_record", snapshot_ref)
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
@@ -835,6 +875,38 @@ def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
CONF.default_ephemeral_format)
+def generate_configdrive(session, instance, vm_ref, userdevice,
+ admin_password=None, files=None):
+ sr_ref = safe_find_sr(session)
+ vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
+ 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
+
+ try:
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ extra_md = {}
+ if admin_password:
+ extra_md['admin_pass'] = admin_password
+ inst_md = instance_metadata.InstanceMetadata(instance,
+ content=files,
+ extra_md=extra_md)
+ with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
+ with utils.tempdir() as tmp_path:
+ tmp_file = os.path.join(tmp_path, 'configdrive')
+ cdb.make_drive(tmp_file)
+
+ dev_path = utils.make_dev_path(dev)
+ utils.execute('dd',
+ 'if=%s' % tmp_file,
+ 'of=%s' % dev_path,
+ run_as_root=True)
+
+ create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
+ read_only=True)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ destroy_vdi(session, vdi_ref)
+
+
def create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
@@ -922,9 +994,7 @@ def _create_image(context, session, instance, name_label, image_id,
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
- # FIXME(sirp): This should be eager loaded like instance metadata
- sys_meta = db.instance_system_metadata_get(context,
- instance['uuid'])
+ sys_meta = _system_metadata_to_dict(instance['system_metadata'])
try:
cache = utils.bool_from_str(sys_meta['image_cache_in_nova'])
except KeyError:
@@ -1017,9 +1087,7 @@ def _image_uses_bittorrent(context, instance):
if xenapi_torrent_images == 'all':
bittorrent = True
elif xenapi_torrent_images == 'some':
- # FIXME(sirp): This should be eager loaded like instance metadata
- sys_meta = db.instance_system_metadata_get(context,
- instance['uuid'])
+ sys_meta = _system_metadata_to_dict(instance['system_metadata'])
try:
bittorrent = utils.bool_from_str(sys_meta['image_bittorrent'])
except KeyError:
@@ -1113,8 +1181,7 @@ def _check_vdi_size(context, session, instance, vdi_uuid):
# FIXME(jk0): this was copied directly from compute.manager.py, let's
# refactor this to a common area
- instance_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(instance_type_id)
+ instance_type = instance['instance_type']
allowed_size_gb = instance_type['root_gb']
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
@@ -1227,6 +1294,9 @@ def determine_disk_image_type(image_meta):
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
+ if not image_meta:
+ return None
+
disk_format = image_meta['disk_format']
disk_format_map = {
@@ -1265,6 +1335,16 @@ def determine_is_pv(session, vdi_ref, disk_image_type, os_type):
3. Glance (DISK): pv is assumed
4. Glance (DISK_ISO): no pv is assumed
+
+ 5. Boot From Volume - without image metadata (None): attempt to
+ use Pygrub to figure out if the volume stores a PV VM or a
+ HVM one. Log a warning, because there may be cases where the
+ volume is RAW (in which case using pygrub is fine) and cases
+ where the content of the volume is VHD, and pygrub might not
+ work as expected.
+ NOTE: if disk_image_type is not specified, instances launched
+ from remote volumes will have to include kernel and ramdisk
+ because external kernel and ramdisk will not be fetched.
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
@@ -1284,6 +1364,12 @@ def determine_is_pv(session, vdi_ref, disk_image_type, os_type):
elif disk_image_type == ImageType.DISK_ISO:
# 4. ISO
is_pv = False
+ elif not disk_image_type:
+ LOG.warning(_("Image format is None: trying to determine PV status "
+ "using pygrub; if instance with vdi %s does not boot "
+ "correctly, try with image metadata.") % vdi_ref)
+ with vdi_attached_here(session, vdi_ref, read_only=True) as dev:
+ is_pv = _is_vdi_pv(dev)
else:
msg = _("Unknown image format %(disk_image_type)s") % locals()
raise exception.NovaException(msg)
@@ -1305,7 +1391,7 @@ def list_vms(session):
def lookup_vm_vdis(session, vm_ref):
- """Look for the VDIs that are attached to the VM"""
+ """Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
@@ -1328,7 +1414,7 @@ def lookup_vm_vdis(session, vm_ref):
def lookup(session, name_label):
- """Look the instance up and return it if available"""
+ """Look the instance up and return it if available."""
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
@@ -1373,7 +1459,7 @@ def is_snapshot(session, vm):
def compile_info(record):
- """Fill record with VM status information"""
+ """Fill record with VM status information."""
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -1382,7 +1468,7 @@ def compile_info(record):
def compile_diagnostics(record):
- """Compile VM diagnostics data"""
+ """Compile VM diagnostics data."""
try:
keys = []
diags = {}
@@ -1437,14 +1523,14 @@ def compile_metrics(start_time, stop_time=None):
def _scan_sr(session, sr_ref=None):
- """Scans the SR specified by sr_ref"""
+ """Scans the SR specified by sr_ref."""
if sr_ref:
LOG.debug(_("Re-scanning SR %s"), sr_ref)
session.call_xenapi('SR.scan', sr_ref)
def scan_default_sr(session):
- """Looks for the system default SR and triggers a re-scan"""
+ """Looks for the system default SR and triggers a re-scan."""
_scan_sr(session, _find_sr(session))
@@ -1459,7 +1545,7 @@ def safe_find_sr(session):
def _find_sr(session):
- """Return the storage repository to hold VM images"""
+ """Return the storage repository to hold VM images."""
host = session.get_xenapi_host()
try:
tokens = CONF.sr_matching_filter.split(':')
@@ -1503,7 +1589,7 @@ def _safe_find_iso_sr(session):
def _find_iso_sr(session):
- """Return the storage repository to hold ISO images"""
+ """Return the storage repository to hold ISO images."""
host = session.get_xenapi_host()
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
@@ -1541,7 +1627,7 @@ def _get_rrd_server():
def _get_rrd(server, vm_uuid):
- """Return the VM RRD XML as a string"""
+ """Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
@@ -1557,7 +1643,7 @@ def _get_rrd(server, vm_uuid):
def _get_rrd_updates(server, start_time):
- """Return the RRD updates XML as a string"""
+ """Return the RRD updates XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
@@ -1663,7 +1749,7 @@ def _get_all_vdis_in_sr(session, sr_ref):
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
- """Return opaqueRef for all the vdis which live on sr"""
+ """Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
@@ -1686,7 +1772,7 @@ def _get_vhd_parent_uuid(session, vdi_ref):
def _walk_vdi_chain(session, vdi_uuid):
- """Yield vdi_recs for each element in a VDI chain"""
+ """Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
@@ -1805,7 +1891,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
- """Wait for device node to appear"""
+ """Wait for device node to appear."""
for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
@@ -1817,7 +1903,7 @@ def _wait_for_device(dev):
def cleanup_attached_vdis(session):
- """Unplug any instance VDIs left after an unclean restart"""
+ """Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
@@ -1882,14 +1968,21 @@ def _get_this_vm_ref(session):
def _is_vdi_pv(dev):
LOG.debug(_("Running pygrub against %s"), dev)
dev_path = utils.make_dev_path(dev)
- output = os.popen('pygrub -qn %s' % dev_path)
- for line in output.readlines():
- #try to find kernel string
- m = re.search('(?<=kernel:)/.*(?:>)', line)
- if m and m.group(0).find('xen') != -1:
- LOG.debug(_("Found Xen kernel %s") % m.group(0))
- return True
- LOG.debug(_("No Xen kernel found. Booting HVM."))
+ try:
+ out, err = utils.execute('pygrub', '-qn', dev_path, run_as_root=True)
+ for line in out:
+ # try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ except exception.ProcessExecutionError:
+ LOG.exception(_("Error while executing pygrub! Please, ensure the "
+ "binary is installed correctly, and available in your "
+ "PATH; on some Linux distros, pygrub may be installed "
+ "in /usr/lib/xen-X.Y/bin/pygrub. Attempting to boot "
+ "in HVM mode."))
return False
@@ -2060,7 +2153,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
def _mount_filesystem(dev_path, dir):
- """mounts the device specified by dev_path in dir"""
+ """mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
@@ -2071,7 +2164,7 @@ def _mount_filesystem(dev_path, dir):
def _mounted_processing(device, key, net, metadata):
- """Callback which runs with the image VDI attached"""
+ """Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
@@ -2081,11 +2174,14 @@ def _mounted_processing(device, key, net, metadata):
try:
# This try block ensures that the umount occurs
if not agent.find_guest_agent(tmpdir):
+ vfs = vfsimpl.VFSLocalFS(imgfile=None,
+ imgfmt=None,
+ imgdir=tmpdir)
LOG.info(_('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
- disk.inject_data_into_fs(tmpdir,
+ disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 0cef398ed..4a8372cda 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -28,13 +28,11 @@ import netaddr
from nova.compute import api as compute
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
-from nova import config
from nova import context as nova_context
-from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
@@ -42,11 +40,14 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
+from nova.virt import configdrive
+from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
@@ -61,8 +62,9 @@ xenapi_vmops_opts = [
help='The XenAPI VIF driver using XenServer Network APIs.')
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
@@ -76,10 +78,11 @@ DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
+DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
- """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
@@ -149,9 +152,10 @@ class VMOps(object):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
- self.poll_rescue_last_ran = None
+ self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
- default=DEFAULT_FIREWALL_DRIVER,
+ DEFAULT_FIREWALL_DRIVER,
+ self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
@@ -181,7 +185,20 @@ class VMOps(object):
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info)
- def finish_revert_migration(self, instance):
+ def _attach_mapped_block_devices(self, instance, block_device_info):
+ # We are attaching these volumes before start (no hotplugging)
+ # because some guests (windows) don't load PV drivers quickly
+ block_device_mapping = virt_driver.block_device_info_get_mapping(
+ block_device_info)
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mount_device = vol['mount_device'].rpartition("/")[2]
+ self._volumeops.attach_volume(connection_info,
+ instance['name'],
+ mount_device,
+ hotplug=False)
+
+ def finish_revert_migration(self, instance, block_device_info=None):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
@@ -192,6 +209,8 @@ class VMOps(object):
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
+ self._attach_mapped_block_devices(instance, block_device_info)
+
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
@@ -223,6 +242,9 @@ class VMOps(object):
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
+
+ self._attach_mapped_block_devices(instance, block_device_info)
+
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
@@ -230,7 +252,7 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None):
- """Power on a VM instance"""
+ """Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
self._session.call_xenapi('VM.start_on', vm_ref,
@@ -238,10 +260,10 @@ class VMOps(object):
False, False)
def _create_disks(self, context, instance, name_label, disk_image_type,
- block_device_info=None):
+ image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
- instance['image_ref'],
+ image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
@@ -269,9 +291,10 @@ class VMOps(object):
return vm_utils.determine_disk_image_type(image_meta)
@step
- def create_disks_step(undo_mgr, disk_image_type):
+ def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
- disk_image_type, block_device_info)
+ disk_image_type, image_meta,
+ block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
@@ -323,7 +346,8 @@ class VMOps(object):
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
- disk_image_type)
+ disk_image_type, admin_password,
+ injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
@@ -387,7 +411,7 @@ class VMOps(object):
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
- vdis = create_disks_step(undo_mgr, disk_image_type)
+ vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
@@ -416,7 +440,12 @@ class VMOps(object):
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
- self.inject_instance_metadata(instance, vm_ref)
+
+ # NOTE(mikal): file injection only happens if we are _not_ using a
+ # configdrive.
+ if not configdrive.required_by(instance):
+ self.inject_instance_metadata(instance, vm_ref)
+
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
@@ -470,7 +499,7 @@ class VMOps(object):
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
- disk_image_type):
+ disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance['instance_type']
@@ -516,6 +545,13 @@ class VMOps(object):
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
+ # Attach (optional) configdrive v2 disk
+ if configdrive.required_by(instance):
+ vm_utils.generate_configdrive(self._session, instance, vm_ref,
+ DEVICE_CONFIGDRIVE,
+ admin_password=admin_password,
+ files=files)
+
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
@@ -536,7 +572,7 @@ class VMOps(object):
greenthread.sleep(0.5)
if self.agent_enabled:
- agent_build = db.agent_build_get_by_triple(
+ agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
@@ -606,7 +642,7 @@ class VMOps(object):
vm,
"start")
- def snapshot(self, context, instance, image_id):
+ def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
@@ -634,7 +670,10 @@ class VMOps(object):
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
- self._session, instance, vm_ref, label) as vdi_uuids:
+ self._session, instance, vm_ref, label,
+ update_task_state) as vdi_uuids:
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
vm_utils.upload_image(
context, self._session, instance, vdi_uuids, image_id)
@@ -692,8 +731,10 @@ class VMOps(object):
instance=instance)
# 2. Power down the instance before resizing
- vm_utils.shutdown_vm(
- self._session, instance, vm_ref, hard=False)
+ if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
+ LOG.debug(_("Clean shutdown did not complete successfully, "
+ "trying hard shutdown."), instance=instance)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
@@ -740,8 +781,10 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
- vm_utils.shutdown_vm(
- self._session, instance, vm_ref, hard=False)
+ if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
+ LOG.debug(_("Clean shutdown did not complete successfully, "
+ "trying hard shutdown."), instance=instance)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
@@ -766,8 +809,10 @@ class VMOps(object):
"""
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
- resize_down = (instance['auto_disk_config'] and
- instance['root_gb'] > instance_type['root_gb'])
+ resize_down = instance['root_gb'] > instance_type['root_gb']
+ if resize_down and not instance['auto_disk_config']:
+ reason = _('Resize down not allowed without auto_disk_config')
+ raise exception.ResizeError(reason=reason)
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
@@ -1037,7 +1082,8 @@ class VMOps(object):
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
- def destroy(self, instance, network_info, block_device_info=None):
+ def destroy(self, instance, network_info, block_device_info=None,
+ destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
@@ -1057,10 +1103,11 @@ class VMOps(object):
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
return self._destroy(instance, vm_ref, network_info,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
- block_device_info=None):
+ block_device_info=None, destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
@@ -1074,12 +1121,13 @@ class VMOps(object):
instance=instance)
return
- vm_utils.shutdown_vm(self._session, instance, vm_ref)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
- # Destroy VDIs
- self._detach_vm_vols(instance, vm_ref, block_device_info)
- self._destroy_vdis(instance, vm_ref, block_device_info)
- self._destroy_kernel_ramdisk(instance, vm_ref)
+ # Destroy VDIs (if necessary)
+ if destroy_disks:
+ self._detach_vm_vols(instance, vm_ref, block_device_info)
+ self._destroy_vdis(instance, vm_ref, block_device_info)
+ self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
@@ -1125,7 +1173,7 @@ class VMOps(object):
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
- vm_utils.shutdown_vm(self._session, instance, vm_ref)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
@@ -1158,7 +1206,7 @@ class VMOps(object):
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
- vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
@@ -1170,7 +1218,7 @@ class VMOps(object):
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
- vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
@@ -1211,45 +1259,6 @@ class VMOps(object):
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
- def poll_rescued_instances(self, timeout):
- """Look for expirable rescued instances.
-
- - forcibly exit rescue mode for any instances that have been
- in rescue mode for >= the provided timeout
-
- """
- last_ran = self.poll_rescue_last_ran
- if not last_ran:
- # We need a base time to start tracking.
- self.poll_rescue_last_ran = timeutils.utcnow()
- return
-
- if not timeutils.is_older_than(last_ran, timeout):
- # Do not run. Let's bail.
- return
-
- # Update the time tracker and proceed.
- self.poll_rescue_last_ran = timeutils.utcnow()
-
- rescue_vms = []
- for instance in self.list_instances():
- if instance.endswith("-rescue"):
- rescue_vms.append(dict(name=instance,
- vm_ref=vm_utils.lookup(self._session,
- instance)))
-
- for vm in rescue_vms:
- rescue_vm_ref = vm["vm_ref"]
-
- original_name = vm["name"].split("-rescue", 1)[0]
- original_vm_ref = vm_utils.lookup(self._session, original_name)
-
- self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
-
- self._release_bootlock(original_vm_ref)
- self._session.call_xenapi("VM.start", original_vm_ref, False,
- False)
-
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
@@ -1319,7 +1328,7 @@ class VMOps(object):
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
- """convert a network info vif to injectable instance data"""
+ """convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
@@ -1518,15 +1527,15 @@ class VMOps(object):
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
- """ recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """ recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """ recreates security group rules for specified instance """
+ """recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
@@ -1538,8 +1547,8 @@ class VMOps(object):
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
- current_aggregate = db.aggregate_get_by_host(context, CONF.host,
- key=pool_states.POOL_FLAG)[0]
+ current_aggregate = self._virtapi.aggregate_get_by_host(
+ context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
@@ -1609,8 +1618,8 @@ class VMOps(object):
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
- """ Check if it is possible to execute live migration
- on the source side.
+ """Check if it's possible to execute live migration on the source side.
+
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
@@ -1629,14 +1638,14 @@ class VMOps(object):
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
- """generate a vdi_map for _call_live_migrate_command """
+ """generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
- """unpack xapi specific parameters, and call a live migrate command"""
+ """unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
@@ -1673,3 +1682,24 @@ class VMOps(object):
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
+
+ def get_per_instance_usage(self):
+ """Get usage info about each active instance."""
+ usage = {}
+
+ def _is_active(vm_rec):
+ power_state = vm_rec['power_state'].lower()
+ return power_state in ['running', 'paused']
+
+ def _get_uuid(vm_rec):
+ other_config = vm_rec['other_config']
+ return other_config.get('nova_uuid', None)
+
+ for vm_ref, vm_rec in vm_utils.list_vms(self._session):
+ uuid = _get_uuid(vm_rec)
+
+ if _is_active(vm_rec) and uuid is not None:
+ memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
+ usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
+
+ return usage
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 99976e0e6..7921e3e87 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -22,17 +22,17 @@ and storage repositories
import re
import string
-from nova import config
-from nova import flags
+from nova import exception
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-CONF = config.CONF
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class StorageError(Exception):
- """To raise errors related to SR, VDI, PBD, and VBD commands"""
+ """To raise errors related to SR, VDI, PBD, and VBD commands."""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
@@ -68,8 +68,9 @@ def create_sr(session, label, params):
def introduce_sr(session, sr_uuid, label, params):
LOG.debug(_("introducing sr within volume_utils"))
- type = params['sr_type']
- del params['sr_type']
+ # If the sr_type is missing, we assume we are
+ # using the default iscsi back-end
+ type = params.pop('sr_type', 'iscsi')
LOG.debug(_('type is = %s') % type)
if 'name_description' in params:
desc = params['name_description']
@@ -167,7 +168,7 @@ def create_iscsi_storage(session, info, label, description):
def find_sr_from_vbd(session, vbd_ref):
- """Find the SR reference from the VBD reference"""
+ """Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
@@ -202,7 +203,7 @@ def unplug_pbds(session, sr_ref):
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
- """Introduce VDI in the host"""
+ """Introduce VDI in the host."""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
@@ -284,18 +285,29 @@ def get_device_number(mountpoint):
return device_number
+def parse_sr_info(connection_data, description=''):
+ label = connection_data.pop('name_label',
+ 'tempSR-%s' % connection_data.get('volume_id'))
+ params = {}
+ if 'sr_uuid' not in connection_data:
+ params = parse_volume_info(connection_data)
+ # This magic label sounds a lot like 'False Disc' in leet-speak
+ uuid = "FA15E-D15C-" + str(params['id'])
+ else:
+ uuid = connection_data['sr_uuid']
+ for k in connection_data.get('introduce_sr_keys', {}):
+ params[k] = connection_data[k]
+ params['name_description'] = connection_data.get('name_description',
+ description)
+
+ return (uuid, label, params)
+
+
def parse_volume_info(connection_data):
"""
Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
- FIXME(armando):
- As for device_path, currently cannot be used as it is,
- because it does not contain target information. As for interim
- solution, target details are passed either via Flags or obtained
- by iscsiadm. Long-term solution is to add a few more fields to the
- db in the iscsi_target table with the necessary info and modify
- the iscsi driver to set them.
"""
volume_id = connection_data['volume_id']
target_portal = connection_data['target_portal']
@@ -323,7 +335,7 @@ def parse_volume_info(connection_data):
def mountpoint_to_number(mountpoint):
- """Translate a mountpoint like /dev/sdc into a numeric"""
+ """Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
@@ -338,7 +350,7 @@ def mountpoint_to_number(mountpoint):
def _get_volume_id(path_or_id):
- """Retrieve the volume id from device_path"""
+ """Retrieve the volume id from device_path."""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
@@ -357,7 +369,7 @@ def _get_volume_id(path_or_id):
def _get_target_host(iscsi_string):
- """Retrieve target host"""
+ """Retrieve target host."""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or CONF.target_host:
@@ -365,17 +377,33 @@ def _get_target_host(iscsi_string):
def _get_target_port(iscsi_string):
- """Retrieve target port"""
+ """Retrieve target port."""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
-def _get_iqn(iscsi_string, id):
- """Retrieve target IQN"""
- if iscsi_string:
- return iscsi_string
- elif iscsi_string is None or CONF.iqn_prefix:
- volume_id = _get_volume_id(id)
- return '%s:%s' % (CONF.iqn_prefix, volume_id)
+def introduce_sr_unless_present(session, sr_uuid, label, params):
+ LOG.debug(_("Introducing SR %s") % label)
+ sr_ref = find_sr_by_uuid(session, sr_uuid)
+ if sr_ref:
+ LOG.debug(_('SR found in xapi database. No need to introduce'))
+ return sr_ref
+ sr_ref = introduce_sr(session, sr_uuid, label, params)
+
+ if sr_ref is None:
+ raise exception.NovaException(_('Could not introduce SR'))
+ return sr_ref
+
+
+def forget_sr_if_present(session, sr_uuid):
+ sr_ref = find_sr_by_uuid(session, sr_uuid)
+ if sr_ref is None:
+ LOG.debug(_('SR %s not found in the xapi database') % sr_uuid)
+ return
+ try:
+ forget_sr(session, sr_uuid)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise exception.NovaException(_('Could not forget SR'))
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index c44e38f92..5f79b6c3a 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -35,84 +35,16 @@ class VolumeOps(object):
def __init__(self, session):
self._session = session
- def create_volume_for_sm(self, volume, sr_uuid):
- LOG.debug("Creating volume for Storage Manager")
-
- sm_vol_rec = {}
- try:
- sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(_('Unable to get SR using uuid'))
- #Create VDI
- label = 'vol-' + volume['id']
- desc = 'xensm volume for ' + volume['id']
- # size presented to xenapi is in bytes, while euca api is in GB
- vdi_size = volume['size'] * 1024 * 1024 * 1024
- vdi_ref = vm_utils.create_vdi(self._session, sr_ref,
- None, label, desc,
- vdi_size, False)
- vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
- sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
- return sm_vol_rec
-
- def delete_volume_for_sm(self, vdi_uuid):
- vdi_ref = self._session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
- if vdi_ref is None:
- raise exception.NovaException(_('Could not find VDI ref'))
-
- vm_utils.destroy_vdi(self._session, vdi_ref)
-
- def create_sr(self, label, params):
- LOG.debug(_("Creating SR %s") % label)
- sr_ref = volume_utils.create_sr(self._session, label, params)
- if sr_ref is None:
- raise exception.NovaException(_('Could not create SR'))
- sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
- if sr_rec is None:
- raise exception.NovaException(_('Could not retrieve SR record'))
- return sr_rec['uuid']
-
- # Checks if sr has already been introduced to this host
- def introduce_sr(self, sr_uuid, label, params):
- LOG.debug(_("Introducing SR %s") % label)
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref:
- LOG.debug(_('SR found in xapi database. No need to introduce'))
- return sr_ref
- sr_ref = volume_utils.introduce_sr(self._session, sr_uuid, label,
- params)
- if sr_ref is None:
- raise exception.NovaException(_('Could not introduce SR'))
- return sr_ref
-
- def is_sr_on_host(self, sr_uuid):
- LOG.debug(_('Checking for SR %s') % sr_uuid)
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref:
- return True
- return False
-
- # Checks if sr has been introduced
- def forget_sr(self, sr_uuid):
- sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
- if sr_ref is None:
- LOG.INFO(_('SR %s not found in the xapi database') % sr_uuid)
- return
- try:
- volume_utils.forget_sr(self._session, sr_uuid)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise exception.NovaException(_('Could not forget SR'))
-
- def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach volume storage to VM instance"""
+ def attach_volume(self, connection_info, instance_name, mountpoint,
+ hotplug=True):
+ """Attach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
+
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
@@ -120,33 +52,23 @@ class VolumeOps(object):
connection_data = connection_info['data']
dev_number = volume_utils.get_device_number(mountpoint)
- if 'name_label' not in connection_data:
- label = 'tempSR-%s' % connection_data['volume_id']
- else:
- label = connection_data['name_label']
- del connection_data['name_label']
+ self._connect_volume(connection_data, dev_number, instance_name,
+ vm_ref, hotplug=hotplug)
- if 'name_description' not in connection_data:
- desc = 'Disk-for:%s' % instance_name
- else:
- desc = connection_data['name_description']
-
- LOG.debug(connection_info)
- sr_params = {}
- if u'sr_uuid' not in connection_data:
- sr_params = volume_utils.parse_volume_info(connection_data)
- uuid = "FA15E-D15C-" + str(sr_params['id'])
- sr_params['sr_type'] = 'iscsi'
- else:
- uuid = connection_data['sr_uuid']
- for k in connection_data['introduce_sr_keys']:
- sr_params[k] = connection_data[k]
+ LOG.info(_('Mountpoint %(mountpoint)s attached to'
+ ' instance %(instance_name)s') % locals())
+
+ def _connect_volume(self, connection_data, dev_number, instance_name,
+ vm_ref, hotplug=True):
- sr_params['name_description'] = desc
+ description = 'Disk-for:%s' % instance_name
+ uuid, label, sr_params = volume_utils.parse_sr_info(connection_data,
+ description)
# Introduce SR
try:
- sr_ref = self.introduce_sr(uuid, label, sr_params)
+ sr_ref = volume_utils.introduce_sr_unless_present(
+ self._session, uuid, label, sr_params)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -168,7 +90,7 @@ class VolumeOps(object):
vdi_uuid, target_lun)
except volume_utils.StorageError, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
@@ -178,30 +100,28 @@ class VolumeOps(object):
osvol=True)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
- self.forget_sr(uuid)
+ volume_utils.forget_sr_if_present(self._session, uuid)
raise Exception(_('Unable to use SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
- try:
- self._session.call_xenapi("VBD.plug", vbd_ref)
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- self.forget_sr(uuid)
- raise Exception(_('Unable to attach volume to instance %s')
- % instance_name)
-
- LOG.info(_('Mountpoint %(mountpoint)s attached to'
- ' instance %(instance_name)s') % locals())
+ if hotplug:
+ try:
+ self._session.call_xenapi("VBD.plug", vbd_ref)
+ except self._session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ volume_utils.forget_sr_if_present(self._session, uuid)
+ raise Exception(_('Unable to attach volume to instance %s')
+ % instance_name)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
- device_number = volume_utils.mountpoint_to_number(mountpoint)
+ device_number = volume_utils.get_device_number(mountpoint)
try:
vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
@@ -210,9 +130,7 @@ class VolumeOps(object):
raise Exception(_('Unable to locate volume %s') % mountpoint)
try:
- vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
- if vm_rec['power_state'] != 'Halted':
+ if not vm_utils._is_vm_shutdown(self._session, vm_ref):
vm_utils.unplug_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
@@ -225,6 +143,7 @@ class VolumeOps(object):
# Forget SR only if no other volumes on this host are using it
try:
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
volume_utils.purge_sr(self._session, sr_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
index 29ef4f248..ffbd509fd 100644
--- a/nova/vnc/__init__.py
+++ b/nova/vnc/__init__.py
@@ -18,8 +18,6 @@
"""Module for VNC Proxying."""
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
@@ -34,7 +32,7 @@ vnc_opts = [
'"http://127.0.0.1:6081/console"'),
cfg.StrOpt('vncserver_listen',
default='127.0.0.1',
- help='Ip address on which instance vncserversshould listen'),
+ help='IP address on which instance vncservers should listen'),
cfg.StrOpt('vncserver_proxyclient_address',
default='127.0.0.1',
help='the address to which proxy clients '
@@ -47,5 +45,5 @@ vnc_opts = [
help='keymap for vnc'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(vnc_opts)
diff --git a/nova/vnc/xvp_proxy.py b/nova/vnc/xvp_proxy.py
index e3a1595d1..9489b949a 100644
--- a/nova/vnc/xvp_proxy.py
+++ b/nova/vnc/xvp_proxy.py
@@ -26,10 +26,8 @@ import eventlet.green
import eventlet.greenio
import eventlet.wsgi
-from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import version
@@ -47,7 +45,7 @@ xvp_proxy_opts = [
help='Address that the XCP VNC proxy should bind to'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(xvp_proxy_opts)
@@ -176,7 +174,7 @@ class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
def get_wsgi_server():
LOG.audit(_("Starting nova-xvpvncproxy node (version %s)"),
- version.version_string_with_vcs())
+ version.version_string_with_package())
return wsgi.Server("XCP VNC Proxy",
XCPVNCProxy(),
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index 1eedd199d..da54a5009 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -18,12 +18,21 @@
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.volume import <foo>' elsewhere.
-import nova.config
-import nova.flags
+import nova.openstack.common.cfg
import nova.openstack.common.importutils
+_volume_opts = [
+ nova.openstack.common.cfg.StrOpt('volume_api_class',
+ default='nova.volume.cinder.API',
+ help='The full class name of the '
+ 'volume API class to use'),
+]
+
+nova.openstack.common.cfg.CONF.register_opts(_volume_opts)
+
def API():
importutils = nova.openstack.common.importutils
- cls = importutils.import_class(nova.config.CONF.volume_api_class)
+ volume_api_class = nova.openstack.common.cfg.CONF.volume_api_class
+ cls = importutils.import_class(volume_api_class)
return cls()
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 45a2b1693..fccdedac8 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -20,14 +20,15 @@
Handles all requests relating to volumes + cinder.
"""
+from copy import deepcopy
+import sys
+from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v1 import client as cinder_client
-from nova import config
from nova.db import base
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -41,9 +42,15 @@ cinder_opts = [
default=None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
+ cfg.StrOpt('os_region_name',
+ default=None,
+ help='region name of this node'),
+ cfg.IntOpt('cinder_http_retries',
+ default=3,
+ help='Number of cinderclient retries on failed http calls'),
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(cinder_opts)
LOG = logging.getLogger(__name__)
@@ -62,7 +69,16 @@ def cinderclient(context):
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
- url = sc.url_for(service_type=service_type,
+ # extract the region if set in configuration
+ if CONF.os_region_name:
+ attr = 'region'
+ filter_value = CONF.os_region_name
+ else:
+ attr = None
+ filter_value = None
+ url = sc.url_for(attr=attr,
+ filter_value=filter_value,
+ service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
@@ -71,7 +87,8 @@ def cinderclient(context):
c = cinder_client.Client(context.user_id,
context.auth_token,
project_id=context.project_id,
- auth_url=url)
+ auth_url=url,
+ retries=CONF.cinder_http_retries)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
@@ -117,6 +134,9 @@ def _untranslate_volume_summary_view(context, vol):
item['value'] = value
d['volume_metadata'].append(item)
+ if hasattr(vol, 'volume_image_metadata'):
+ d['volume_image_metadata'] = deepcopy(vol.volume_image_metadata)
+
return d
@@ -141,9 +161,24 @@ def _untranslate_snapshot_summary_view(context, snapshot):
class API(base.Base):
"""API for interacting with the volume manager."""
+ def _reraise_translated_volume_exception(self, volume_id):
+ """Transform the exception for the volume but keep its traceback
+ intact."""
+ exc_type, exc_value, exc_trace = sys.exc_info()
+ new_exc = self._translate_volume_exception(volume_id, exc_value)
+ raise new_exc, None, exc_trace
+
+ def _translate_volume_exception(self, volume_id, exc_value):
+ if isinstance(exc_value, cinder_exception.NotFound):
+ return exception.VolumeNotFound(volume_id=volume_id)
+ return exc_value
+
def get(self, context, volume_id):
- item = cinderclient(context).volumes.get(volume_id)
- return _untranslate_volume_summary_view(context, item)
+ try:
+ item = cinderclient(context).volumes.get(volume_id)
+ return _untranslate_volume_summary_view(context, item)
+ except Exception:
+ self._reraise_translated_volume_exception(volume_id)
def get_all(self, context, search_opts={}):
items = cinderclient(context).volumes.list(detailed=True)
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
deleted file mode 100644
index 07bbbde6c..000000000
--- a/nova/volume/driver.py
+++ /dev/null
@@ -1,954 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Drivers for volumes.
-
-"""
-
-import os
-import tempfile
-import time
-import urllib
-
-from nova import config
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova import utils
-from nova.volume import iscsi
-
-
-LOG = logging.getLogger(__name__)
-
-volume_opts = [
- cfg.StrOpt('volume_group',
- default='nova-volumes',
- help='Name for the VG that will contain exported volumes'),
- cfg.IntOpt('num_shell_tries',
- default=3,
- help='number of times to attempt to run flakey shell commands'),
- cfg.IntOpt('num_iscsi_scan_tries',
- default=3,
- help='number of times to rescan iSCSI target to find volume'),
- cfg.IntOpt('iscsi_num_targets',
- default=100,
- help='Number of iscsi target ids per host'),
- cfg.StrOpt('iscsi_target_prefix',
- default='iqn.2010-10.org.openstack:',
- help='prefix for iscsi volumes'),
- cfg.StrOpt('iscsi_ip_address',
- default='$my_ip',
- help='use this ip for iscsi'),
- cfg.IntOpt('iscsi_port',
- default=3260,
- help='The port that the iSCSI daemon is listening on'),
- cfg.StrOpt('rbd_pool',
- default='rbd',
- help='the RADOS pool in which rbd volumes are stored'),
- cfg.StrOpt('rbd_user',
- default=None,
- help='the RADOS client name for accessing rbd volumes'),
- cfg.StrOpt('rbd_secret_uuid',
- default=None,
- help='the libvirt uuid of the secret for the rbd_user'
- 'volumes'),
- cfg.StrOpt('volume_tmp_dir',
- default=None,
- help='where to store temporary image files if the volume '
- 'driver does not write them directly to the volume'),
- ]
-
-CONF = config.CONF
-CONF.register_opts(volume_opts)
-
-
-class VolumeDriver(object):
- """Executes commands relating to Volumes."""
- def __init__(self, execute=utils.execute, *args, **kwargs):
- # NOTE(vish): db is set by Manager
- self.db = None
- self.set_execute(execute)
-
- def set_execute(self, execute):
- self._execute = execute
-
- def _try_execute(self, *command, **kwargs):
- # NOTE(vish): Volume commands can partially fail due to timing, but
- # running them a second time on failure will usually
- # recover nicely.
- tries = 0
- while True:
- try:
- self._execute(*command, **kwargs)
- return True
- except exception.ProcessExecutionError:
- tries = tries + 1
- if tries >= CONF.num_shell_tries:
- raise
- LOG.exception(_("Recovering from a failed execute. "
- "Try number %s"), tries)
- time.sleep(tries ** 2)
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- out, err = self._execute('vgs', '--noheadings', '-o', 'name',
- run_as_root=True)
- volume_groups = out.split()
- if not CONF.volume_group in volume_groups:
- exception_message = (_("volume group %s doesn't exist")
- % CONF.volume_group)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- def _create_volume(self, volume_name, sizestr):
- self._try_execute('lvcreate', '-L', sizestr, '-n',
- volume_name, CONF.volume_group, run_as_root=True)
-
- def _copy_volume(self, srcstr, deststr, size_in_g):
- # Use O_DIRECT to avoid thrashing the system buffer cache
- direct_flags = ('iflag=direct', 'oflag=direct')
-
- # Check whether O_DIRECT is supported
- try:
- self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
- *direct_flags, run_as_root=True)
- except exception.ProcessExecutionError:
- direct_flags = ()
-
- # Perform the copy
- self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
- 'count=%d' % (size_in_g * 1024), 'bs=1M',
- *direct_flags, run_as_root=True)
-
- def _volume_not_present(self, volume_name):
- path_name = '%s/%s' % (CONF.volume_group, volume_name)
- try:
- self._try_execute('lvdisplay', path_name, run_as_root=True)
- except Exception as e:
- # If the volume isn't present
- return True
- return False
-
- def _delete_volume(self, volume, size_in_g):
- """Deletes a logical volume."""
- # zero out old volumes to prevent data leaking between users
- # TODO(ja): reclaiming space should be done lazy and low priority
- self._copy_volume('/dev/zero', self.local_path(volume), size_in_g)
- dev_path = self.local_path(volume)
- if os.path.exists(dev_path):
- self._try_execute('dmsetup', 'remove', '-f', dev_path,
- run_as_root=True)
- self._try_execute('lvremove', '-f', "%s/%s" %
- (CONF.volume_group,
- self._escape_snapshot(volume['name'])),
- run_as_root=True)
-
- def _sizestr(self, size_in_g):
- if int(size_in_g) == 0:
- return '100M'
- return '%sG' % size_in_g
-
- # Linux LVM reserves name that starts with snapshot, so that
- # such volume name can't be created. Mangle it.
- def _escape_snapshot(self, snapshot_name):
- if not snapshot_name.startswith('snapshot'):
- return snapshot_name
- return '_' + snapshot_name
-
- def create_volume(self, volume):
- """Creates a logical volume. Can optionally return a Dictionary of
- changes to the volume object to be persisted."""
- self._create_volume(volume['name'], self._sizestr(volume['size']))
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- self._create_volume(volume['name'], self._sizestr(volume['size']))
- self._copy_volume(self.local_path(snapshot), self.local_path(volume),
- snapshot['volume_size'])
-
- def delete_volume(self, volume):
- """Deletes a logical volume."""
- if self._volume_not_present(volume['name']):
- # If the volume isn't present, then don't attempt to delete
- return True
-
- # TODO(yamahata): lvm can't delete origin volume only without
- # deleting derived snapshots. Can we do something fancy?
- out, err = self._execute('lvdisplay', '--noheading',
- '-C', '-o', 'Attr',
- '%s/%s' % (CONF.volume_group,
- volume['name']),
- run_as_root=True)
- # fake_execute returns None resulting unit test error
- if out:
- out = out.strip()
- if (out[0] == 'o') or (out[0] == 'O'):
- raise exception.VolumeIsBusy(volume_name=volume['name'])
-
- self._delete_volume(volume, volume['size'])
-
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
- orig_lv_name = "%s/%s" % (CONF.volume_group, snapshot['volume_name'])
- self._try_execute('lvcreate', '-L',
- self._sizestr(snapshot['volume_size']),
- '--name', self._escape_snapshot(snapshot['name']),
- '--snapshot', orig_lv_name, run_as_root=True)
-
- def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
- if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
- # If the snapshot isn't present, then don't attempt to delete
- return True
-
- # TODO(yamahata): zeroing out the whole snapshot triggers COW.
- # it's quite slow.
- self._delete_volume(snapshot, snapshot['volume_size'])
-
- def local_path(self, volume):
- # NOTE(vish): stops deprecation warning
- escaped_group = CONF.volume_group.replace('-', '--')
- escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
- return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- raise NotImplementedError()
-
- def create_export(self, context, volume):
- """Exports the volume. Can optionally return a Dictionary of changes
- to the volume object to be persisted."""
- raise NotImplementedError()
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
- raise NotImplementedError()
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- raise NotImplementedError()
-
- def initialize_connection(self, volume, connector):
- """Allow connection to connector and return connection info."""
- raise NotImplementedError()
-
- def terminate_connection(self, volume, connector):
- """Disallow connection from connector"""
- raise NotImplementedError()
-
- def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
- """ Callback for volume attached to instance."""
- pass
-
- def detach_volume(self, context, volume_id):
- """ Callback for volume detached."""
- pass
-
- def get_volume_stats(self, refresh=False):
- """Return the current state of the volume service. If 'refresh' is
- True, run the update first."""
- return None
-
- def do_setup(self, context):
- """Any initialization the volume driver does while starting"""
- pass
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- """Fetch the image from image_service and write it to the volume."""
- raise NotImplementedError()
-
- def copy_volume_to_image(self, context, volume, image_service, image_id):
- """Copy the volume to the specified image."""
- raise NotImplementedError()
-
- def clone_image(self, volume, image_location):
- """Create a volume efficiently from an existing image.
-
- image_location is a string whose format depends on the
- image service backend in use. The driver should use it
- to determine whether cloning is possible.
-
- Returns a boolean indicating whether cloning occurred
- """
- return False
-
-
-class ISCSIDriver(VolumeDriver):
- """Executes commands relating to ISCSI volumes.
-
- We make use of model provider properties as follows:
-
- ``provider_location``
- if present, contains the iSCSI target information in the same
- format as an ietadm discovery
- i.e. '<ip>:<port>,<portal> <target IQN>'
-
- ``provider_auth``
- if present, contains a space-separated triple:
- '<auth method> <auth username> <auth password>'.
- `CHAP` is the only auth_method in use at the moment.
- """
-
- def __init__(self, *args, **kwargs):
- self.tgtadm = iscsi.get_target_admin()
- super(ISCSIDriver, self).__init__(*args, **kwargs)
-
- def set_execute(self, execute):
- super(ISCSIDriver, self).set_execute(execute)
- self.tgtadm.set_execute(execute)
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- # NOTE(jdg): tgtadm doesn't use the iscsi_targets table
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- try:
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
- volume['id'])
- except exception.NotFound:
- LOG.info(_("Skipping ensure_export. No iscsi_target "
- "provisioned for volume: %s"), volume['id'])
- return
- else:
- iscsi_target = 1 # dummy value when using TgtAdm
-
- iscsi_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
- volume_path = "/dev/%s/%s" % (CONF.volume_group, volume['name'])
-
- # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
- # should clean this all up at some point in the future
- self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
- 0, volume_path,
- check_exit_code=False)
-
- def _ensure_iscsi_targets(self, context, host):
- """Ensure that target ids have been created in datastore."""
- # NOTE(jdg): tgtadm doesn't use the iscsi_targets table
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
- host)
- if host_iscsi_targets >= CONF.iscsi_num_targets:
- return
-
- # NOTE(vish): Target ids start at 1, not 0.
- for target_num in xrange(1, CONF.iscsi_num_targets + 1):
- target = {'host': host, 'target_num': target_num}
- self.db.iscsi_target_create_safe(context, target)
-
- def create_export(self, context, volume):
- """Creates an export for a logical volume."""
- #BOOKMARK(jdg)
-
- iscsi_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
- volume_path = "/dev/%s/%s" % (CONF.volume_group, volume['name'])
-
- model_update = {}
-
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- lun = 0
- self._ensure_iscsi_targets(context, volume['host'])
- iscsi_target = self.db.volume_allocate_iscsi_target(context,
- volume['id'],
- volume['host'])
- else:
- lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
- iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
-
- # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
- # should clean this all up at some point in the future
- tid = self.tgtadm.create_iscsi_target(iscsi_name,
- iscsi_target,
- 0,
- volume_path)
- model_update['provider_location'] = _iscsi_location(
- CONF.iscsi_ip_address, tid, iscsi_name, lun)
- return model_update
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
-
- # NOTE(jdg): tgtadm doesn't use the iscsi_targets table
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- try:
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
- volume['id'])
- except exception.NotFound:
- LOG.info(_("Skipping remove_export. No iscsi_target "
- "provisioned for volume: %s"), volume['id'])
- return
- else:
- iscsi_target = 0
-
- try:
-
- # NOTE: provider_location may be unset if the volume hasn't
- # been exported
- location = volume['provider_location'].split(' ')
- iqn = location[1]
-
- # ietadm show will exit with an error
- # this export has already been removed
- self.tgtadm.show_target(iscsi_target, iqn=iqn)
- except Exception as e:
- LOG.info(_("Skipping remove_export. No iscsi_target "
- "is presently exported for volume: %s"), volume['id'])
- return
-
- self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
-
- def _do_iscsi_discovery(self, volume):
- #TODO(justinsb): Deprecate discovery and use stored info
- #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
- LOG.warn(_("ISCSI provider_location not stored, using discovery"))
-
- volume_name = volume['name']
-
- (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
- '-t', 'sendtargets', '-p', volume['host'],
- run_as_root=True)
- for target in out.splitlines():
- if CONF.iscsi_ip_address in target and volume_name in target:
- return target
- return None
-
- def _get_iscsi_properties(self, volume):
- """Gets iscsi configuration
-
- We ideally get saved information in the volume entity, but fall back
- to discovery if need be. Discovery may be completely removed in future
- The properties are:
-
- :target_discovered: boolean indicating whether discovery was used
-
- :target_iqn: the IQN of the iSCSI target
-
- :target_portal: the portal of the iSCSI target
-
- :target_lun: the lun of the iSCSI target
-
- :volume_id: the id of the volume (currently used by xen)
-
- :auth_method:, :auth_username:, :auth_password:
-
- the authentication details. Right now, either auth_method is not
- present meaning no authentication, or auth_method == `CHAP`
- meaning use CHAP with the specified credentials.
- """
-
- properties = {}
-
- location = volume['provider_location']
-
- if location:
- # provider_location is the same format as iSCSI discovery output
- properties['target_discovered'] = False
- else:
- location = self._do_iscsi_discovery(volume)
-
- if not location:
- raise exception.InvalidVolume(_("Could not find iSCSI export "
- " for volume %s") %
- (volume['name']))
-
- LOG.debug(_("ISCSI Discovery: Found %s") % (location))
- properties['target_discovered'] = True
-
- results = location.split(" ")
- properties['target_portal'] = results[0].split(",")[0]
- properties['target_iqn'] = results[1]
- try:
- properties['target_lun'] = int(results[2])
- except (IndexError, ValueError):
- if CONF.iscsi_helper == 'tgtadm':
- properties['target_lun'] = 1
- else:
- properties['target_lun'] = 0
-
- properties['volume_id'] = volume['id']
-
- auth = volume['provider_auth']
- if auth:
- (auth_method, auth_username, auth_secret) = auth.split()
-
- properties['auth_method'] = auth_method
- properties['auth_username'] = auth_username
- properties['auth_password'] = auth_secret
-
- return properties
-
- def _run_iscsiadm(self, iscsi_properties, iscsi_command):
- (out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
- iscsi_properties['target_iqn'],
- '-p', iscsi_properties['target_portal'],
- *iscsi_command, run_as_root=True)
- LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
- (iscsi_command, out, err))
- return (out, err)
-
- def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
- iscsi_command = ('--op', 'update', '-n', property_key,
- '-v', property_value)
- return self._run_iscsiadm(iscsi_properties, iscsi_command)
-
- def initialize_connection(self, volume, connector):
- """Initializes the connection and returns connection info.
-
- The iscsi driver returns a driver_volume_type of 'iscsi'.
- The format of the driver data is defined in _get_iscsi_properties.
- Example return value::
-
- {
- 'driver_volume_type': 'iscsi'
- 'data': {
- 'target_discovered': True,
- 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
- 'target_portal': '127.0.0.0.1:3260',
- 'volume_id': 1,
- }
- }
-
- """
-
- iscsi_properties = self._get_iscsi_properties(volume)
- return {
- 'driver_volume_type': 'iscsi',
- 'data': iscsi_properties
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- vol_uuid_file = 'volume-%s' % volume_id
- volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
- if os.path.isfile(volume_path):
- iqn = '%s%s' % (CONF.iscsi_target_prefix,
- vol_uuid_file)
- else:
- raise exception.PersistentVolumeFileNotFound(volume_id=volume_id)
-
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- tid = self.db.volume_get_iscsi_target_num(context, volume_id)
- else:
- tid = 0
-
- try:
- self.tgtadm.show_target(tid, iqn=iqn)
- except exception.ProcessExecutionError, e:
- # Instances remount read-only in this case.
- # /etc/init.d/iscsitarget restart and rebooting nova-volume
- # is better since ensure_export() works at boot time.
- LOG.error(_("Cannot confirm exported volume "
- "id:%(volume_id)s.") % locals())
- raise
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- """Fetch the image from image_service and write it to the volume."""
- volume_path = self.local_path(volume)
- with utils.temporary_chown(volume_path):
- with utils.file_open(volume_path, "wb") as image_file:
- image_service.download(context, image_id, image_file)
-
- def copy_volume_to_image(self, context, volume, image_service, image_id):
- """Copy the volume to the specified image."""
- volume_path = self.local_path(volume)
- with utils.temporary_chown(volume_path):
- with utils.file_open(volume_path) as volume_file:
- image_service.update(context, image_id, {}, volume_file)
-
-
-class FakeISCSIDriver(ISCSIDriver):
- """Logs calls instead of executing."""
- def __init__(self, *args, **kwargs):
- super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
- *args, **kwargs)
-
- def check_for_setup_error(self):
- """No setup necessary in fake mode."""
- pass
-
- def initialize_connection(self, volume, connector):
- return {
- 'driver_volume_type': 'iscsi',
- 'data': {}
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
- @staticmethod
- def fake_execute(cmd, *_args, **_kwargs):
- """Execute that simply logs the command."""
- LOG.debug(_("FAKE ISCSI: %s"), cmd)
- return (None, None)
-
-
-class RBDDriver(VolumeDriver):
- """Implements RADOS block device (RBD) volume commands"""
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- (stdout, stderr) = self._execute('rados', 'lspools')
- pools = stdout.split("\n")
- if not CONF.rbd_pool in pools:
- exception_message = (_("rbd has no pool %s") %
- CONF.rbd_pool)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- def _supports_layering(self):
- stdout, _ = self._execute('rbd', '--help')
- return 'clone' in stdout
-
- def create_volume(self, volume):
- """Creates a logical volume."""
- if int(volume['size']) == 0:
- size = 100
- else:
- size = int(volume['size']) * 1024
- args = ['rbd', 'create',
- '--pool', CONF.rbd_pool,
- '--size', size,
- volume['name']]
- if self._supports_layering():
- args += ['--new-format']
- self._try_execute(*args)
-
- def _clone(self, volume, src_pool, src_image, src_snap):
- self._try_execute('rbd', 'clone',
- '--pool', src_pool,
- '--image', src_image,
- '--snap', src_snap,
- '--dest-pool', CONF.rbd_pool,
- '--dest', volume['name'])
-
- def _resize(self, volume):
- size = int(volume['size']) * 1024
- self._try_execute('rbd', 'resize',
- '--pool', CONF.rbd_pool,
- '--image', volume['name'],
- '--size', size)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- self._clone(volume, CONF.rbd_pool,
- snapshot['volume_name'], snapshot['name'])
- if int(volume['size']):
- self._resize(volume)
-
- def delete_volume(self, volume):
- """Deletes a logical volume."""
- stdout, _ = self._execute('rbd', 'snap', 'ls',
- '--pool', CONF.rbd_pool,
- volume['name'])
- if stdout.count('\n') > 1:
- raise exception.VolumeIsBusy(volume_name=volume['name'])
- self._try_execute('rbd', 'rm',
- '--pool', CONF.rbd_pool,
- volume['name'])
-
- def create_snapshot(self, snapshot):
- """Creates an rbd snapshot"""
- self._try_execute('rbd', 'snap', 'create',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
- if self._supports_layering():
- self._try_execute('rbd', 'snap', 'protect',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
-
- def delete_snapshot(self, snapshot):
- """Deletes an rbd snapshot"""
- if self._supports_layering():
- try:
- self._try_execute('rbd', 'snap', 'unprotect',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
- except exception.ProcessExecutionError:
- raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
- self._try_execute('rbd', 'snap', 'rm',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
-
- def local_path(self, volume):
- """Returns the path of the rbd volume."""
- # This is the same as the remote path
- # since qemu accesses it directly.
- return "rbd:%s/%s" % (CONF.rbd_pool, volume['name'])
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- pass
-
- def create_export(self, context, volume):
- """Exports the volume"""
- pass
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- pass
-
- def initialize_connection(self, volume, connector):
- return {
- 'driver_volume_type': 'rbd',
- 'data': {
- 'name': '%s/%s' % (CONF.rbd_pool, volume['name']),
- 'auth_enabled': CONF.rbd_secret_uuid is not None,
- 'auth_username': CONF.rbd_user,
- 'secret_type': 'ceph',
- 'secret_uuid': CONF.rbd_secret_uuid,
- }
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
- def _parse_location(self, location):
- prefix = 'rbd://'
- if not location.startswith(prefix):
- reason = _('Image %s is not stored in rbd') % location
- raise exception.ImageUnacceptable(reason)
- pieces = map(urllib.unquote, location[len(prefix):].split('/'))
- if any(map(lambda p: p == '', pieces)):
- reason = _('Image %s has blank components') % location
- raise exception.ImageUnacceptable(reason)
- if len(pieces) != 4:
- reason = _('Image %s is not an rbd snapshot') % location
- raise exception.ImageUnacceptable(reason)
- return pieces
-
- def _get_fsid(self):
- stdout, _ = self._execute('ceph', 'fsid')
- return stdout.rstrip('\n')
-
- def _is_cloneable(self, image_location):
- try:
- fsid, pool, image, snapshot = self._parse_location(image_location)
- except exception.ImageUnacceptable:
- return False
-
- if self._get_fsid() != fsid:
- reason = _('%s is in a different ceph cluster') % image_location
- LOG.debug(reason)
- return False
-
- # check that we can read the image
- try:
- self._execute('rbd', 'info',
- '--pool', pool,
- '--image', image,
- '--snap', snapshot)
- except exception.ProcessExecutionError:
- LOG.debug(_('Unable to read image %s') % image_location)
- return False
-
- return True
-
- def clone_image(self, volume, image_location):
- if image_location is None or not self._is_cloneable(image_location):
- return False
- _, pool, image, snapshot = self._parse_location(image_location)
- self._clone(volume, pool, image, snapshot)
- self._resize(volume)
- return True
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- # TODO(jdurgin): replace with librbd
- # this is a temporary hack, since rewriting this driver
- # to use librbd would take too long
- if CONF.volume_tmp_dir and not os.exists(CONF.volume_tmp_dir):
- os.makedirs(CONF.volume_tmp_dir)
-
- with tempfile.NamedTemporaryFile(dir=CONF.volume_tmp_dir) as tmp:
- image_service.download(context, image_id, tmp)
- # import creates the image, so we must remove it first
- self._try_execute('rbd', 'rm',
- '--pool', CONF.rbd_pool,
- volume['name'])
- self._try_execute('rbd', 'import',
- '--pool', CONF.rbd_pool,
- tmp.name, volume['name'])
-
-
-class SheepdogDriver(VolumeDriver):
- """Executes commands relating to Sheepdog Volumes"""
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- try:
- #NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
- # gives short output, but for compatibility reason we won't
- # use it and just check if 'running' is in the output.
- (out, err) = self._execute('collie', 'cluster', 'info')
- if not 'running' in out.split():
- exception_message = _("Sheepdog is not working: %s") % out
- raise exception.VolumeBackendAPIException(
- data=exception_message)
-
- except exception.ProcessExecutionError:
- exception_message = _("Sheepdog is not working")
- raise exception.NovaException(data=exception_message)
-
- def create_volume(self, volume):
- """Creates a sheepdog volume"""
- self._try_execute('qemu-img', 'create',
- "sheepdog:%s" % volume['name'],
- self._sizestr(volume['size']))
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a sheepdog volume from a snapshot."""
- self._try_execute('qemu-img', 'create', '-b',
- "sheepdog:%s:%s" % (snapshot['volume_name'],
- snapshot['name']),
- "sheepdog:%s" % volume['name'])
-
- def delete_volume(self, volume):
- """Deletes a logical volume"""
- self._try_execute('collie', 'vdi', 'delete', volume['name'])
-
- def create_snapshot(self, snapshot):
- """Creates a sheepdog snapshot"""
- self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
- "sheepdog:%s" % snapshot['volume_name'])
-
- def delete_snapshot(self, snapshot):
- """Deletes a sheepdog snapshot"""
- self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
- '-s', snapshot['name'])
-
- def local_path(self, volume):
- return "sheepdog:%s" % volume['name']
-
- def ensure_export(self, context, volume):
- """Safely and synchronously recreates an export for a logical volume"""
- pass
-
- def create_export(self, context, volume):
- """Exports the volume"""
- pass
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- pass
-
- def initialize_connection(self, volume, connector):
- return {
- 'driver_volume_type': 'sheepdog',
- 'data': {
- 'name': volume['name']
- }
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
-
-class LoggingVolumeDriver(VolumeDriver):
- """Logs and records calls, for unit tests."""
-
- def check_for_setup_error(self):
- pass
-
- def create_volume(self, volume):
- self.log_action('create_volume', volume)
-
- def delete_volume(self, volume):
- self.log_action('delete_volume', volume)
-
- def local_path(self, volume):
- print "local_path not implemented"
- raise NotImplementedError()
-
- def ensure_export(self, context, volume):
- self.log_action('ensure_export', volume)
-
- def create_export(self, context, volume):
- self.log_action('create_export', volume)
-
- def remove_export(self, context, volume):
- self.log_action('remove_export', volume)
-
- def initialize_connection(self, volume, connector):
- self.log_action('initialize_connection', volume)
-
- def terminate_connection(self, volume, connector):
- self.log_action('terminate_connection', volume)
-
- def check_for_export(self, context, volume_id):
- self.log_action('check_for_export', volume_id)
-
- _LOGS = []
-
- @staticmethod
- def clear_logs():
- LoggingVolumeDriver._LOGS = []
-
- @staticmethod
- def log_action(action, parameters):
- """Logs the command."""
- LOG.debug(_("LoggingVolumeDriver: %s") % (action))
- log_dictionary = {}
- if parameters:
- log_dictionary = dict(parameters)
- log_dictionary['action'] = action
- LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
- LoggingVolumeDriver._LOGS.append(log_dictionary)
-
- @staticmethod
- def all_logs():
- return LoggingVolumeDriver._LOGS
-
- @staticmethod
- def logs_like(action, **kwargs):
- matches = []
- for entry in LoggingVolumeDriver._LOGS:
- if entry['action'] != action:
- continue
- match = True
- for k, v in kwargs.iteritems():
- if entry.get(k) != v:
- match = False
- break
- if match:
- matches.append(entry)
- return matches
-
-
-def _iscsi_location(ip, target, iqn, lun=None):
- return "%s:%s,%s %s %s" % (ip, CONF.iscsi_port, target, iqn, lun)
diff --git a/nova/volume/iscsi.py b/nova/volume/iscsi.py
deleted file mode 100644
index ce2776920..000000000
--- a/nova/volume/iscsi.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Helper code for the iSCSI volume driver.
-
-"""
-import os
-
-from nova import config
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import fileutils
-from nova.openstack.common import log as logging
-from nova import utils
-
-LOG = logging.getLogger(__name__)
-
-iscsi_helper_opt = [
- cfg.StrOpt('iscsi_helper',
- default='tgtadm',
- help='iscsi target user-land tool to use'),
- cfg.StrOpt('volumes_dir',
- default='$state_path/volumes',
- help='Volume configuration file storage directory'),
-]
-
-CONF = config.CONF
-CONF.register_opts(iscsi_helper_opt)
-
-
-class TargetAdmin(object):
- """iSCSI target administration.
-
- Base class for iSCSI target admin helpers.
- """
-
- def __init__(self, cmd, execute):
- self._cmd = cmd
- self.set_execute(execute)
-
- def set_execute(self, execute):
- """Set the function to be used to execute commands."""
- self._execute = execute
-
- def _run(self, *args, **kwargs):
- self._execute(self._cmd, *args, run_as_root=True, **kwargs)
-
- def create_iscsi_target(self, name, tid, lun, path, **kwargs):
- """Create a iSCSI target and logical unit"""
- raise NotImplementedError()
-
- def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- """Remove a iSCSI target and logical unit"""
- raise NotImplementedError()
-
- def _new_target(self, name, tid, **kwargs):
- """Create a new iSCSI target."""
- raise NotImplementedError()
-
- def _delete_target(self, tid, **kwargs):
- """Delete a target."""
- raise NotImplementedError()
-
- def show_target(self, tid, iqn=None, **kwargs):
- """Query the given target ID."""
- raise NotImplementedError()
-
- def _new_logicalunit(self, tid, lun, path, **kwargs):
- """Create a new LUN on a target using the supplied path."""
- raise NotImplementedError()
-
- def _delete_logicalunit(self, tid, lun, **kwargs):
- """Delete a logical unit from a target."""
- raise NotImplementedError()
-
-
-class TgtAdm(TargetAdmin):
- """iSCSI target administration using tgtadm."""
-
- def __init__(self, execute=utils.execute):
- super(TgtAdm, self).__init__('tgtadm', execute)
-
- def _get_target(self, iqn):
- (out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
- lines = out.split('\n')
- for line in lines:
- if iqn in line:
- parsed = line.split()
- tid = parsed[1]
- return tid[:-1]
-
- return None
-
- def create_iscsi_target(self, name, tid, lun, path, **kwargs):
- # Note(jdg) tid and lun aren't used by TgtAdm but remain for
- # compatibility
-
- fileutils.ensure_tree(CONF.volumes_dir)
-
- vol_id = name.split(':')[1]
- volume_conf = """
- <target %s>
- backing-store %s
- </target>
- """ % (name, path)
-
- LOG.info(_('Creating volume: %s') % vol_id)
- volumes_dir = CONF.volumes_dir
- volume_path = os.path.join(volumes_dir, vol_id)
-
- f = open(volume_path, 'w+')
- f.write(volume_conf)
- f.close()
-
- try:
- (out, err) = self._execute('tgt-admin',
- '--update',
- name,
- run_as_root=True)
- except exception.ProcessExecutionError, e:
- LOG.error(_("Failed to create iscsi target for volume "
- "id:%(vol_id)s.") % locals())
-
- #Don't forget to remove the persistent file we created
- os.unlink(volume_path)
- raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
-
- iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
- tid = self._get_target(iqn)
- if tid is None:
- LOG.error(_("Failed to create iscsi target for volume "
- "id:%(vol_id)s. Please ensure your tgtd config file "
- "contains 'include %(volumes_dir)s/*'") % locals())
- raise exception.NotFound()
-
- return tid
-
- def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
- vol_uuid_file = 'volume-%s' % vol_id
- volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
- if os.path.isfile(volume_path):
- iqn = '%s%s' % (CONF.iscsi_target_prefix,
- vol_uuid_file)
- else:
- raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
- try:
- self._execute('tgt-admin',
- '--delete',
- iqn,
- run_as_root=True)
- except exception.ProcessExecutionError, e:
- LOG.error(_("Failed to create iscsi target for volume "
- "id:%(volume_id)s.") % locals())
- raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
-
- os.unlink(volume_path)
-
- def show_target(self, tid, iqn=None, **kwargs):
- if iqn is None:
- raise exception.InvalidParameterValue(
- err=_('valid iqn needed for show_target'))
-
- tid = self._get_target(iqn)
- if tid is None:
- raise exception.NotFound()
-
-
-class IetAdm(TargetAdmin):
- """iSCSI target administration using ietadm."""
-
- def __init__(self, execute=utils.execute):
- super(IetAdm, self).__init__('ietadm', execute)
-
- def create_iscsi_target(self, name, tid, lun, path, **kwargs):
- self._new_target(name, tid, **kwargs)
- self._new_logicalunit(tid, lun, path, **kwargs)
- return tid
-
- def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
- self._delete_logicalunit(tid, lun, **kwargs)
- self._delete_target(tid, **kwargs)
-
- def _new_target(self, name, tid, **kwargs):
- self._run('--op', 'new',
- '--tid=%s' % tid,
- '--params', 'Name=%s' % name,
- **kwargs)
-
- def _delete_target(self, tid, **kwargs):
- self._run('--op', 'delete',
- '--tid=%s' % tid,
- **kwargs)
-
- def show_target(self, tid, iqn=None, **kwargs):
- self._run('--op', 'show',
- '--tid=%s' % tid,
- **kwargs)
-
- def _new_logicalunit(self, tid, lun, path, **kwargs):
- self._run('--op', 'new',
- '--tid=%s' % tid,
- '--lun=%d' % lun,
- '--params', 'Path=%s,Type=fileio' % path,
- **kwargs)
-
- def _delete_logicalunit(self, tid, lun, **kwargs):
- self._run('--op', 'delete',
- '--tid=%s' % tid,
- '--lun=%d' % lun,
- **kwargs)
-
-
-def get_target_admin():
- if CONF.iscsi_helper == 'tgtadm':
- return TgtAdm()
- else:
- return IetAdm()
diff --git a/nova/weights.py b/nova/weights.py
new file mode 100644
index 000000000..981171b3e
--- /dev/null
+++ b/nova/weights.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Pluggable Weighing support
+"""
+
+from nova import loadables
+
+
+class WeighedObject(object):
+ """Object with weight information."""
+ def __init__(self, obj, weight):
+ self.obj = obj
+ self.weight = weight
+
+ def __repr__(self):
+ return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
+
+
+class BaseWeigher(object):
+ """Base class for pluggable weighers."""
+ def _weight_multiplier(self):
+ """How weighted this weigher should be. Normally this would
+ be overriden in a subclass based on a config value.
+ """
+ return 1.0
+
+ def _weigh_object(self, obj, weight_properties):
+ """Override in a subclass to specify a weight for a specific
+ object.
+ """
+ return 0.0
+
+ def weigh_objects(self, weighed_obj_list, weight_properties):
+ """Weigh multiple objects. Override in a subclass if you need
+ need access to all objects in order to manipulate weights.
+ """
+ for obj in weighed_obj_list:
+ obj.weight += (self._weight_multiplier() *
+ self._weigh_object(obj.obj, weight_properties))
+
+
+class BaseWeightHandler(loadables.BaseLoader):
+ object_class = WeighedObject
+
+ def get_weighed_objects(self, weigher_classes, obj_list,
+ weighing_properties):
+ """Return a sorted (highest score first) list of WeighedObjects."""
+
+ if not obj_list:
+ return []
+
+ weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
+ for weigher_cls in weigher_classes:
+ weigher = weigher_cls()
+ weigher.weigh_objects(weighed_objs, weighing_properties)
+
+ return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index b8880dfde..16851dba8 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -20,6 +20,7 @@
"""Utility methods for working with WSGI servers."""
import os.path
+import socket
import sys
import eventlet
@@ -30,13 +31,14 @@ import routes.middleware
import webob.dec
import webob.exc
-from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
wsgi_opts = [
+ cfg.StrOpt('api_paste_config',
+ default="api-paste.ini",
+ help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
@@ -45,7 +47,7 @@ wsgi_opts = [
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.')
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
@@ -81,8 +83,22 @@ class Server(object):
raise exception.InvalidInput(
reason='The backlog must be more than 1')
- self._socket = eventlet.listen((host, port), backlog=backlog)
- (self.host, self.port) = self._socket.getsockname()
+ bind_addr = (host, port)
+ # TODO(dims): eventlet's green dns/socket module does not actually
+ # support IPv6 in getaddrinfo(). We need to get around this in the
+ # future or monitor upstream for a fix
+ try:
+ info = socket.getaddrinfo(bind_addr[0],
+ bind_addr[1],
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0]
+ family = info[0]
+ bind_addr = info[-1]
+ except Exception:
+ family = socket.AF_INET
+
+ self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
+ (self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
diff --git a/openstack-common.conf b/openstack-common.conf
index 666fb059e..ea33ab235 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,context,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
+modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
index dcbee3ded..0f88e52bb 100644
--- a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
+++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
@@ -22,7 +22,7 @@ import subprocess
def execute_get_output(*command):
- """Execute and return stdout"""
+ """Execute and return stdout."""
devnull = open(os.devnull, 'w')
command = map(str, command)
proc = subprocess.Popen(command, close_fds=True,
@@ -32,7 +32,7 @@ def execute_get_output(*command):
def execute(*command):
- """Execute without returning stdout"""
+ """Execute without returning stdout."""
devnull = open(os.devnull, 'w')
command = map(str, command)
proc = subprocess.Popen(command, close_fds=True,
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
index 5cfd32dbd..be873a7e8 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -38,7 +38,7 @@ pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
- """Errors that occur when calling xenstore-* through subprocesses"""
+ """Errors that occur when calling xenstore-* through subprocesses."""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
diff --git a/run_tests.sh b/run_tests.sh
index dd1145d2c..1a54c1bef 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -11,7 +11,6 @@ function usage {
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
- echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
@@ -33,14 +32,12 @@ function process_option {
-s|--no-site-packages) no_site_packages=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
- -m|--patch-migrate) patch_migrate=1;;
- -w|--no-patch-migrate) patch_migrate=0;;
-f|--force) force=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
- -*) noseopts="$noseopts $1";;
- *) noseargs="$noseargs $1"
+ -*) testropts="$testropts $1";;
+ *) testrargs="$testrargs $1"
esac
}
@@ -51,68 +48,62 @@ never_venv=0
force=0
no_site_packages=0
installvenvopts=
-noseargs=
-noseopts=
+testrargs=
+testropts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
recreate_db=1
-patch_migrate=1
-export NOSE_WITH_OPENSTACK=1
-export NOSE_OPENSTACK_COLOR=1
-export NOSE_OPENSTACK_RED=0.05
-export NOSE_OPENSTACK_YELLOW=0.025
-export NOSE_OPENSTACK_SHOW_ELAPSED=1
-export NOSE_OPENSTACK_STDOUT=1
-
-export LANG=en_US.UTF-8
-export LANGUAGE=en_US:en
-export LC_ALL=C
+LANG=en_US.UTF-8
+LANGUAGE=en_US:en
+LC_ALL=C
for arg in "$@"; do
process_option $arg
done
-# If enabled, tell nose to collect coverage data
-if [ $coverage -eq 1 ]; then
- noseopts="$noseopts --with-coverage --cover-package=nova"
-fi
-
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
+function init_testr {
+ if [ ! -d .testrepository ]; then
+ ${wrapper} testr init
+ fi
+}
+
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
- # Just run the test suites in current environment
- ${wrapper} $NOSETESTS | tee nosetests.log
- # If we get some short import error right away, print the error log directly
- RESULT=$?
- if [ "$RESULT" -ne "0" ];
- then
- ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
- if [ "$ERRSIZE" -lt "40" ];
- then
- cat run_tests.log
- fi
- else
- tests_run=$(awk '/^Ran/ {print $2}' nosetests.log)
- if [ -z "$tests_run" ] || [ "$tests_run" -eq 0 ];
- then
- echo "ERROR: Zero tests ran, something is wrong!"
- echo "This is usually caused by a parse error in some python"
- echo "file or a failure to set up the environment (i.e. during"
- echo "temporary database preparation). Running nosetests directly"
- echo "may offer more clues."
- return 1
+
+ if [ $coverage -eq 1 ]; then
+ # Do not test test_coverage_ext when gathering coverage.
+ if [ "x$testrargs" = "x" ]; then
+ testrargs="^(?!.*test_coverage_ext).*$"
fi
+ export PYTHON="${wrapper} coverage run --source nova --parallel-mode"
fi
+ # Just run the test suites in current environment
+ set +e
+ TESTRTESTS="$TESTRTESTS $testrargs"
+ echo "Running \`${wrapper} $TESTRTESTS\`"
+ ${wrapper} $TESTRTESTS
+ RESULT=$?
+ set -e
+
+ copy_subunit_log
+
return $RESULT
}
+function copy_subunit_log {
+ LOGNAME=`cat .testrepository/next-stream`
+ LOGNAME=$(($LOGNAME - 1))
+ LOGNAME=".testrepository/${LOGNAME}"
+ cp $LOGNAME subunit.log
+}
function run_pep8 {
echo "Running PEP8 and HACKING compliance check..."
@@ -125,22 +116,28 @@ function run_pep8 {
srcfiles=`find nova -type f -name "*.py" ! -wholename "nova\/openstack*"`
srcfiles+=" `find bin -type f ! -name "nova.conf*" ! -name "*api-paste.ini*"`"
srcfiles+=" `find tools -type f -name "*.py"`"
+ srcfiles+=" `find plugins -type f -name "*.py"`"
+ srcfiles+=" `find smoketests -type f -name "*.py"`"
srcfiles+=" setup.py"
# Until all these issues get fixed, ignore.
- ignore='--ignore=N4,E12,E711,E721'
+ ignore='--ignore=E12,E711,E721,E712'
${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
+ # NOTE(sdague): as of grizzly-2 these are passing however leaving the comment
+ # in here in case we need to break it out when we get more of our hacking working
+ # again.
+ #
# NOTE(sirp): Dom0 plugins are written for Python 2.4, meaning some HACKING
# checks are too strict.
- pep8onlyfiles=`find plugins -type f -name "*.py"`
- pep8onlyfiles+=" `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`"
- ${wrapper} pep8 ${ignore} ${pep8onlyfiles}
+ # pep8onlyfiles=`find plugins -type f -name "*.py"`
+ # pep8onlyfiles+=" `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`"
+ # ${wrapper} pep8 ${ignore} ${pep8onlyfiles}
}
-NOSETESTS="nosetests $noseopts $noseargs"
+TESTRTESTS="testr run --parallel $testropts"
if [ $never_venv -eq 0 ]
then
@@ -182,13 +179,14 @@ if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
+init_testr
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
-# distinguish between options (noseopts), which begin with a '-', and
-# arguments (noseargs).
-if [ -z "$noseargs" ]; then
+# distinguish between options (testropts), which begin with a '-', and
+# arguments (testrargs).
+if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
@@ -197,5 +195,6 @@ fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
+ ${wrapper} coverage combine
${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
fi
diff --git a/setup.cfg b/setup.cfg
index 07a80bb68..a4932f63b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,10 +21,3 @@ input_file = nova/locale/nova.pot
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = nova/locale/nova.pot
-
-[nosetests]
-verbosity=2
-cover-package = nova
-cover-html = true
-cover-erase = true
-where=nova/tests
diff --git a/setup.py b/setup.py
index 2d6201a12..12de5c4d6 100644
--- a/setup.py
+++ b/setup.py
@@ -49,10 +49,14 @@ setuptools.setup(name='nova',
'bin/nova-api-ec2',
'bin/nova-api-metadata',
'bin/nova-api-os-compute',
+ 'bin/nova-baremetal-deploy-helper',
+ 'bin/nova-baremetal-manage',
'bin/nova-rpc-zmq-receiver',
+ 'bin/nova-cells',
'bin/nova-cert',
'bin/nova-clear-rabbit-queues',
'bin/nova-compute',
+ 'bin/nova-conductor',
'bin/nova-console',
'bin/nova-consoleauth',
'bin/nova-dhcpbridge',
diff --git a/smoketests/base.py b/smoketests/base.py
index 93f78f5dc..f6cec3168 100644
--- a/smoketests/base.py
+++ b/smoketests/base.py
@@ -63,7 +63,7 @@ class SmokeTestCase(unittest.TestCase):
return status == 0
def wait_for_running(self, instance, tries=60, wait=1):
- """Wait for instance to be running"""
+ """Wait for instance to be running."""
for x in xrange(tries):
instance.update()
if instance.state.startswith('running'):
@@ -72,18 +72,20 @@ class SmokeTestCase(unittest.TestCase):
else:
return False
- def wait_for_not_running(self, instance, tries=60, wait=1):
- """Wait for instance to not be running"""
+ def wait_for_deleted(self, instance, tries=60, wait=1):
+ """Wait for instance to be deleted."""
for x in xrange(tries):
- instance.update()
- if not instance.state.startswith('running'):
+ try:
+ #NOTE(dprince): raises exception when instance id disappears
+ instance.update(validate=True)
+ except ValueError:
return True
time.sleep(wait)
else:
return False
def wait_for_ping(self, ip, command="ping", tries=120):
- """Wait for ip to be pingable"""
+ """Wait for ip to be pingable."""
for x in xrange(tries):
if self.can_ping(ip, command):
return True
@@ -91,7 +93,7 @@ class SmokeTestCase(unittest.TestCase):
return False
def wait_for_ssh(self, ip, key_name, tries=30, wait=5):
- """Wait for ip to be sshable"""
+ """Wait for ip to be sshable."""
for x in xrange(tries):
try:
conn = self.connect_ssh(ip, key_name)
@@ -139,9 +141,7 @@ class SmokeTestCase(unittest.TestCase):
**kwargs)
def split_clc_url(self, clc_url):
- """
- Splits a cloud controller endpoint url.
- """
+ """Splits a cloud controller endpoint url."""
parts = httplib.urlsplit(clc_url)
is_secure = parts.scheme == 'https'
ip, port = parts.netloc.split(':')
diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py
index 4215f705d..72f892568 100644
--- a/smoketests/test_netadmin.py
+++ b/smoketests/test_netadmin.py
@@ -196,7 +196,7 @@ class SecurityGroupTests(base.UserSmokeTestCase):
self.conn.disassociate_address(self.data['public_ip'])
self.conn.delete_key_pair(TEST_KEY)
self.conn.terminate_instances([self.data['instance'].id])
- self.wait_for_not_running(self.data['instance'])
+ self.wait_for_deleted(self.data['instance'])
self.conn.delete_security_group(TEST_GROUP)
groups = self.conn.get_all_security_groups()
self.assertFalse(TEST_GROUP in [group.name for group in groups])
diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py
index d6491c9d4..b05f0ac4b 100644
--- a/smoketests/test_sysadmin.py
+++ b/smoketests/test_sysadmin.py
@@ -249,12 +249,24 @@ class VolumeTests(base.UserSmokeTestCase):
self.assertTrue(volume.status.startswith('in-use'))
- # Give instance time to recognize volume.
- time.sleep(5)
-
def test_003_can_mount_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
+
+ # NOTE(dprince): give some time for volume to show up in partitions
+ stdin, stdout, stderr = conn.exec_command(
+ 'COUNT="0";'
+ 'until cat /proc/partitions | grep "%s\\$"; do '
+ '[ "$COUNT" -eq "60" ] && exit 1;'
+ 'COUNT=$(( $COUNT + 1 ));'
+ 'sleep 1; '
+ 'done'
+ % self.device.rpartition('/')[2])
+ out = stdout.read()
+ if not out.strip().endswith(self.device.rpartition('/')[2]):
+ self.fail('Timeout waiting for volume partition in instance. %s %s'
+ % (out, stderr.read()))
+
# NOTE(vish): this will create a dev for images that don't have
# udev rules
stdin, stdout, stderr = conn.exec_command(
diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py
index 836e48578..3185cb93d 100644
--- a/tools/conf/extract_opts.py
+++ b/tools/conf/extract_opts.py
@@ -39,7 +39,6 @@ OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
-OPTION_HELP_INDENT = "####"
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
@@ -47,10 +46,6 @@ WORDWRAP_WIDTH = 60
def main(srcfiles):
- print '\n'.join(['#' * 20, '# nova.conf sample #', '#' * 20,
- '', '[DEFAULT]', ''])
- _list_opts(cfg.CommonConfigOpts,
- cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__)
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
@@ -63,31 +58,94 @@ def main(srcfiles):
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
+
+ # opts_by_group is a mapping of group name to an options list
+ # The options list is a list of (module, options) tuples
+ opts_by_group = {'DEFAULT': []}
+
+ opts_by_group['DEFAULT'].append(
+ (cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__,
+ _list_opts(cfg.CommonConfigOpts)[0][1]))
+
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
- _print_module(mod_str)
+ if mod_str.endswith('.__init__'):
+ mod_str = mod_str[:mod_str.rfind(".")]
+
+ mod_obj = _import_module(mod_str)
+ if not mod_obj:
+ continue
+
+ for group, opts in _list_opts(mod_obj):
+ opts_by_group.setdefault(group, []).append((mod_str, opts))
+
+ print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
+ for group, opts in opts_by_group.items():
+ print_group_opts(group, opts)
+
print "# Total option count: %d" % OPTION_COUNT
-def _print_module(mod_str):
- mod_obj = None
- if mod_str.endswith('.__init__'):
- mod_str = mod_str[:mod_str.rfind(".")]
+def _import_module(mod_str):
try:
- mod_obj = importutils.import_module(mod_str)
+ return importutils.import_module(mod_str)
except (ValueError, AttributeError), err:
- return
+ return None
except ImportError, ie:
sys.stderr.write("%s\n" % str(ie))
- return
+ return None
except Exception, e:
- return
- _list_opts(mod_obj, mod_str)
+ return None
+
+
+def _guess_groups(opt, mod_obj):
+ groups = []
+
+ # is it in the DEFAULT group?
+ if (opt.dest in cfg.CONF and
+ not isinstance(cfg.CONF[opt.dest], cfg.CONF.GroupAttr)):
+ groups.append('DEFAULT')
+ # what other groups is it in?
+ for key, value in cfg.CONF.items():
+ if not isinstance(value, cfg.CONF.GroupAttr):
+ continue
+ if opt.dest not in value:
+ continue
+ groups.append(key)
-def _list_opts(obj, name):
+ if len(groups) == 1:
+ return groups[0]
+
+ group = None
+ for g in groups:
+ if g in mod_obj.__name__:
+ group = g
+ break
+
+ if group is None and 'DEFAULT' in groups:
+ sys.stderr.write("Guessing that " + opt.dest +
+ " in " + mod_obj.__name__ +
+ " is in DEFAULT group out of " +
+ ','.join(groups) + "\n")
+ return 'DEFAULT'
+
+ if group is None:
+ sys.stderr("Unable to guess what group " + opt.dest +
+ " in " + mod_obj.__name__ +
+ " is in out of " + ','.join(groups) + "\n")
+ sys.exit(1)
+
+ sys.stderr.write("Guessing that " + opt.dest +
+ " in " + mod_obj.__name__ +
+ " is in the " + group +
+ " group out of " + ','.join(groups) + "\n")
+ return group
+
+
+def _list_opts(obj):
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
@@ -96,14 +154,26 @@ def _list_opts(obj, name):
elif (isinstance(attr_obj, list) and
all(map(lambda x: isinstance(x, cfg.Opt), attr_obj))):
opts.extend(attr_obj)
- if not opts:
- return
- global OPTION_COUNT
- OPTION_COUNT += len(opts)
- print '######## defined in %s ########\n' % name
+
+ ret = {}
for opt in opts:
- _print_opt(opt)
+ ret.setdefault(_guess_groups(opt, obj), []).append(opt)
+ return ret.items()
+
+
+def print_group_opts(group, opts_by_module):
+ print "[%s]" % group
print
+ global OPTION_COUNT
+ for mod, opts in opts_by_module:
+ OPTION_COUNT += len(opts)
+ print '#'
+ print '# Options defined in %s' % mod
+ print '#'
+ print
+ for opt in opts:
+ _print_opt(opt)
+ print
def _get_my_ip():
@@ -134,10 +204,14 @@ def _sanitize_default(s):
return s
-def _wrap(msg, indent):
- padding = ' ' * indent
- prefix = "\n%s %s " % (OPTION_HELP_INDENT, padding)
- return prefix.join(textwrap.wrap(msg, WORDWRAP_WIDTH))
+OPT_TYPES = {
+ 'StrOpt': 'string value',
+ 'BoolOpt': 'boolean value',
+ 'IntOpt': 'integer value',
+ 'FloatOpt': 'floating point value',
+ 'ListOpt': 'list value',
+ 'MultiStrOpt': 'multi valued',
+}
def _print_opt(opt):
@@ -150,35 +224,35 @@ def _print_opt(opt):
except (ValueError, AttributeError), err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
+ opt_help += ' (' + OPT_TYPES[opt_type] + ')'
+ print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
- print '# %s=<None>' % opt_name
+ print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
- print '# %s=%s' % (opt_name, _sanitize_default(opt_default))
+ print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
- print '# %s=%s' % (opt_name, str(opt_default).lower())
+ print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
- print '# %s=%s' % (opt_name, opt_default)
+ print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
- print '# %s=%s' % (opt_name, opt_default)
+ print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
- print '# %s=%s' % (opt_name, ','.join(opt_default))
+ print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
for default in opt_default:
- print '# %s=%s' % (opt_name, default)
+ print '#%s=%s' % (opt_name, default)
+ print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
- opt_type_tag = "(%s)" % opt_type
- print OPTION_HELP_INDENT, opt_type_tag, _wrap(opt_help, len(opt_type_tag))
- print
if __name__ == '__main__':
diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py
index 873f20543..a4bcfeaa8 100755
--- a/tools/db/schema_diff.py
+++ b/tools/db/schema_diff.py
@@ -107,7 +107,7 @@ def _get_db_driver_class(db_type):
elif db_type == "postgres":
return Postgres
else:
- raise Exception("database %s not supported" % db_type)
+ raise Exception(_("database %s not supported") % db_type)
### Migrate
diff --git a/tools/flakes.py b/tools/flakes.py
new file mode 100644
index 000000000..4b93abc21
--- /dev/null
+++ b/tools/flakes.py
@@ -0,0 +1,22 @@
+"""
+ wrapper for pyflakes to ignore gettext based warning:
+ "undefined name '_'"
+
+ From https://bugs.launchpad.net/pyflakes/+bug/844592
+"""
+import __builtin__
+import os
+import sys
+
+from pyflakes.scripts.pyflakes import main
+
+if __name__ == "__main__":
+ names = os.environ.get('PYFLAKES_BUILTINS', '_')
+ names = [x.strip() for x in names.split(',')]
+ for x in names:
+ if not hasattr(__builtin__, x):
+ setattr(__builtin__, x, True)
+
+ del names, os, __builtin__
+
+ sys.exit(main())
diff --git a/tools/hacking.py b/tools/hacking.py
index a22e1c6ff..7322fd071 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -51,48 +51,38 @@ VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
-def filename_match(filename, patterns, default=True):
- """
- Check if patterns contains a pattern that matches filename.
- If patterns is unspecified, this always returns True.
- """
- if not patterns:
- return default
- return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)
-
-
-def excluded(filename):
+# See https://github.com/jcrocholl/pep8/pull/111
+def excluded(self, filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
- return any((filename_match(filename, pep8.options.exclude,
+ return any((pep8.filename_match(filename, self.options.exclude,
default=False),
- filename_match(basename, pep8.options.exclude,
+ pep8.filename_match(basename, self.options.exclude,
default=False)))
-def input_dir(dirname, runner=None):
- """
- Check all Python source files in this directory and all subdirectories.
- """
+def input_dir(self, dirname):
+ """Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
- if excluded(dirname):
- return
- if runner is None:
- runner = pep8.input_file
+ if self.excluded(dirname):
+ return 0
+ counters = self.options.report.counters
+ verbose = self.options.verbose
+ filepatterns = self.options.filename
+ runner = self.runner
for root, dirs, files in os.walk(dirname):
- if pep8.options.verbose:
+ if verbose:
print('directory ' + root)
- pep8.options.counters['directories'] += 1
- dirs.sort()
- for subdir in dirs[:]:
- if excluded(os.path.join(root, subdir)):
+ counters['directories'] += 1
+ for subdir in sorted(dirs):
+ if self.excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
- files.sort()
- for filename in files:
- if pep8.filename_match(filename) and not excluded(filename):
- pep8.options.counters['files'] += 1
+ for filename in sorted(files):
+ # contain a pattern that matches?
+ if ((pep8.filename_match(filename, filepatterns) and
+ not self.excluded(filename))):
runner(os.path.join(root, filename))
@@ -242,7 +232,7 @@ def nova_import_module_only(logical_line):
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
rval = importModuleCheck(mod)
- if rval != None:
+ if rval is not None:
yield rval
# TODO(jogo) handle "from x import *"
@@ -271,18 +261,30 @@ def nova_import_alphabetical(logical_line, line_number, lines):
% (split_previous[1], split_line[1]))
-def nova_docstring_start_space(physical_line):
+def nova_import_no_db_in_virt(logical_line, filename):
+ if ("nova/virt" in filename and
+ not filename.endswith("fake.py") and
+ "nova import db" in logical_line):
+ yield (0, "NOVA N307: nova.db import not allowed in nova/virt/*")
+
+
+def nova_docstring_start_space(physical_line, previous_logical):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- if (pos != -1 and len(physical_line) > pos + 1):
- if (physical_line[pos + 3] == ' '):
- return (pos, "NOVA N401: one line docstring should not start with"
- " a space")
+ # it's important that we determine this is actually a docstring,
+ # and not a doc block used somewhere after the first line of a
+ # function def
+ if (previous_logical.startswith("def ") or
+ previous_logical.startswith("class ")):
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])
+ if (pos != -1 and len(physical_line) > pos + 4):
+ if (physical_line[pos + 3] == ' '):
+ return (pos, "NOVA N401: docstring should not start with"
+ " a space")
def nova_docstring_one_line(physical_line):
@@ -295,7 +297,7 @@ def nova_docstring_one_line(physical_line):
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
- if (physical_line[-5] != '.'):
+ if (physical_line[-5] not in ['.', '?', '!']):
return pos, "NOVA N402: one line docstring needs a period"
@@ -474,8 +476,8 @@ if __name__ == "__main__":
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
- pep8.excluded = excluded
- pep8.input_dir = input_dir
+ pep8.StyleGuide.excluded = excluded
+ pep8.StyleGuide.input_dir = input_dir
try:
pep8._main()
sys.exit(once_error)
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 19b8f3f1e..b1ceb74f0 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -196,9 +196,6 @@ def install_dependencies(venv=VENV):
pip_install('-r', PIP_REQUIRES)
pip_install('-r', TEST_REQUIRES)
- # Install nova into the virtual_env. No more path munging!
- run_command([os.path.join(venv, 'bin/python'), 'setup.py', 'develop'])
-
def post_process():
get_distro().post_process()
diff --git a/tools/lintstack.py b/tools/lintstack.py
index ce9b6f8a6..5c4fb0a3a 100755
--- a/tools/lintstack.py
+++ b/tools/lintstack.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""pylint error checking"""
+"""pylint error checking."""
import cStringIO as StringIO
import json
diff --git a/tools/lintstack.sh b/tools/lintstack.sh
index 848a16fa5..42c6a60b3 100755
--- a/tools/lintstack.sh
+++ b/tools/lintstack.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2012, AT&T Labs, Yun Mao <yunmao@gmail.com>
+# Copyright (c) 2012-2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,13 +15,31 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Use lintstack.py to compare pylint errors between HEAD and HEAD~1
-
+# Use lintstack.py to compare pylint errors.
+# We run pylint twice, once on HEAD, once on the code before the latest
+# commit for review.
set -e
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
GITHEAD=`git rev-parse HEAD`
cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py
-git checkout HEAD~1
+
+if git rev-parse HEAD^2 2>/dev/null; then
+ # The HEAD is a Merge commit. Here, the patch to review is
+ # HEAD^2, the master branch is at HEAD^1, and the patch was
+ # written based on HEAD^2~1.
+ PREV_COMMIT=`git rev-parse HEAD^2~1`
+ git checkout HEAD~1
+ # The git merge is necessary for reviews with a series of patches.
+ # If not, this is a no-op so won't hurt either.
+ git merge $PREV_COMMIT
+else
+ # The HEAD is not a merge commit. This won't happen on gerrit.
+ # Most likely you are running against your own patch locally.
+ # We assume the patch to examine is HEAD, and we compare it against
+ # HEAD~1
+ git checkout HEAD~1
+fi
+
# First generate tools/pylint_exceptions from HEAD~1
$TOOLS_DIR/lintstack.head.py generate
# Then use that as a reference to compare against HEAD
diff --git a/tools/pip-requires b/tools/pip-requires
index a214d7bc2..1845ba7dd 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,13 +1,14 @@
SQLAlchemy>=0.7.8,<=0.7.9
Cheetah==2.4.4
-amqplib==0.6.1
+amqplib>=0.6.1
anyjson>=0.2.4
-boto==2.1.1
+argparse
+boto
eventlet>=0.9.17
-kombu==1.0.4
-lxml>=2.3,<=2.3.5
+kombu>=1.0.4
+lxml>=2.3
routes==1.12.3
-WebOb==1.0.8
+WebOb==1.2.3
greenlet>=0.3.1
PasteDeploy==1.5.0
paste
@@ -22,3 +23,5 @@ setuptools_git>=0.4
python-cinderclient
python-quantumclient>=2.1
python-glanceclient>=0.5.0,<2
+python-keystoneclient>=0.2.0
+stevedore>=0.7
diff --git a/tools/test-requires b/tools/test-requires
index fc56d3c87..6ee42d31c 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -2,13 +2,14 @@
distribute>=0.6.24
coverage
+discover
+feedparser
+fixtures>=0.3.12
mox==0.5.3
-nose
-testtools
-openstack.nose_plugin>=0.7
-nosehtmloutput
-pep8==1.2
+MySQL-python
+pep8==1.3.3
pylint==0.25.2
+python-subunit
sphinx>=1.1.2
-feedparser
-MySQL-python
+testrepository>=0.0.8
+testtools>=0.9.22
diff --git a/tools/xenserver/destroy_cached_images.py b/tools/xenserver/destroy_cached_images.py
index 01d68b4f6..fa71c0a68 100644
--- a/tools/xenserver/destroy_cached_images.py
+++ b/tools/xenserver/destroy_cached_images.py
@@ -26,7 +26,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -43,7 +42,7 @@ destroy_opts = [
help='Don\'t actually delete the VDIs.')
]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_cli_opts(destroy_opts)
diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py
index 10f9c1ffe..eeaf978b8 100755
--- a/tools/xenserver/vm_vdi_cleaner.py
+++ b/tools/xenserver/vm_vdi_cleaner.py
@@ -27,16 +27,21 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
-from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
+from nova.virt import virtapi
from nova.virt.xenapi import driver as xenapi_driver
-
-CONF = config.CONF
+cleaner_opts = [
+ cfg.IntOpt('zombie_instance_updated_at_window',
+ default=172800,
+ help='Number of seconds zombie instances are cleaned up.'),
+]
+CONF = cfg.CONF
+CONF.register_opts(cleaner_opts)
CONF.import_opt("resize_confirm_window", "nova.compute.manager")
@@ -45,7 +50,7 @@ ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances",
def call_xenapi(xenapi, method, *args):
- """Make a call to xapi"""
+ """Make a call to xapi."""
return xenapi._session.call_xenapi(method, *args)
@@ -285,7 +290,9 @@ def main():
raise Exception("`zombie_instance_updated_at_window` has to be longer"
" than `resize_confirm_window`.")
- xenapi = xenapi_driver.XenAPIDriver()
+ # NOTE(blamar) This tool does not require DB access, so passing in the
+ # 'abstract' VirtAPI class is acceptable
+ xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI())
if command == "list-vdis":
if CONF.verbose:
diff --git a/tox.ini b/tox.ini
index a40b1900a..1c43be4ed 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,38 +3,44 @@ envlist = py26,py27,pep8
[testenv]
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=0.05
- NOSE_OPENSTACK_YELLOW=0.025
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
-
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
-commands = nosetests {posargs}
+commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
+ bash -c 'testr run --parallel {posargs} ; RET=$? ; echo "Slowest Tests" ; testr slowest && exit $RET'
[tox:jenkins]
sitepackages = True
downloadcache = ~/cache/pip
[testenv:pep8]
-deps=pep8==1.2
+deps=pep8==1.3.3
commands =
- python tools/hacking.py --ignore=N4,E12,E711,E721 --repeat --show-source \
- --exclude=.venv,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
+ python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
+ --exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
+ python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
+ --filename=nova* bin
[testenv:pylint]
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/tools/pip-requires
- pylint==0.25.2
+ pylint==0.26.0
commands = bash tools/lintstack.sh
+[testenv:pyflakes]
+deps = pyflakes
+commands = python tools/flakes.py nova
+
[testenv:cover]
-setenv = NOSE_WITH_COVERAGE=1
+# Need to omit DynamicallyCompiledCheetahTemplate.py from coverage because
+# it ceases to exist post test run. Also do not run test_coverage_ext tests
+# while gathering coverage as those tests conflict with coverage.
+setenv = OMIT=--omit=DynamicallyCompiledCheetahTemplate.py
+ PYTHON=coverage run --source nova --parallel-mode
+commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
+ bash -c 'testr run --parallel \^\(\?\!\.\*test_coverage_ext\)\.\*\$ ; RET=$? ; coverage combine ; coverage html -d ./cover $OMIT && exit $RET'
[testenv:venv]
commands = {posargs}