summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Authors3
-rw-r--r--MANIFEST.in3
-rwxr-xr-xbin/nova-ajax-console-proxy5
-rwxr-xr-xbin/nova-dhcpbridge3
-rwxr-xr-xbin/nova-manage216
-rwxr-xr-xbin/stack3
-rw-r--r--bzrplugins/novalog/__init__.py60
-rw-r--r--contrib/redhat-eventlet.patch16
-rwxr-xr-xdoc/find_autodoc_modules.sh2
-rw-r--r--doc/source/api/autoindex.rst141
-rw-r--r--doc/source/api/nova..adminclient.rst6
-rw-r--r--doc/source/api/nova..api.direct.rst6
-rw-r--r--doc/source/api/nova..api.ec2.admin.rst6
-rw-r--r--doc/source/api/nova..api.ec2.apirequest.rst6
-rw-r--r--doc/source/api/nova..api.ec2.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.metadatarequesthandler.rst6
-rw-r--r--doc/source/api/nova..api.openstack.auth.rst6
-rw-r--r--doc/source/api/nova..api.openstack.backup_schedules.rst6
-rw-r--r--doc/source/api/nova..api.openstack.common.rst6
-rw-r--r--doc/source/api/nova..api.openstack.consoles.rst6
-rw-r--r--doc/source/api/nova..api.openstack.faults.rst6
-rw-r--r--doc/source/api/nova..api.openstack.flavors.rst6
-rw-r--r--doc/source/api/nova..api.openstack.images.rst6
-rw-r--r--doc/source/api/nova..api.openstack.servers.rst6
-rw-r--r--doc/source/api/nova..api.openstack.shared_ip_groups.rst6
-rw-r--r--doc/source/api/nova..api.openstack.zones.rst6
-rw-r--r--doc/source/api/nova..auth.dbdriver.rst6
-rw-r--r--doc/source/api/nova..auth.fakeldap.rst6
-rw-r--r--doc/source/api/nova..auth.ldapdriver.rst6
-rw-r--r--doc/source/api/nova..auth.manager.rst6
-rw-r--r--doc/source/api/nova..auth.signer.rst6
-rw-r--r--doc/source/api/nova..cloudpipe.pipelib.rst6
-rw-r--r--doc/source/api/nova..compute.api.rst6
-rw-r--r--doc/source/api/nova..compute.instance_types.rst6
-rw-r--r--doc/source/api/nova..compute.manager.rst6
-rw-r--r--doc/source/api/nova..compute.power_state.rst6
-rw-r--r--doc/source/api/nova..console.api.rst6
-rw-r--r--doc/source/api/nova..console.fake.rst6
-rw-r--r--doc/source/api/nova..console.manager.rst6
-rw-r--r--doc/source/api/nova..console.xvp.rst6
-rw-r--r--doc/source/api/nova..context.rst6
-rw-r--r--doc/source/api/nova..crypto.rst6
-rw-r--r--doc/source/api/nova..db.api.rst6
-rw-r--r--doc/source/api/nova..db.base.rst6
-rw-r--r--doc/source/api/nova..db.migration.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migration.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.models.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.session.rst6
-rw-r--r--doc/source/api/nova..exception.rst6
-rw-r--r--doc/source/api/nova..fakememcache.rst6
-rw-r--r--doc/source/api/nova..fakerabbit.rst6
-rw-r--r--doc/source/api/nova..flags.rst6
-rw-r--r--doc/source/api/nova..image.glance.rst6
-rw-r--r--doc/source/api/nova..image.local.rst6
-rw-r--r--doc/source/api/nova..image.s3.rst6
-rw-r--r--doc/source/api/nova..image.service.rst6
-rw-r--r--doc/source/api/nova..log.rst6
-rw-r--r--doc/source/api/nova..manager.rst6
-rw-r--r--doc/source/api/nova..network.api.rst6
-rw-r--r--doc/source/api/nova..network.linux_net.rst6
-rw-r--r--doc/source/api/nova..network.manager.rst6
-rw-r--r--doc/source/api/nova..objectstore.bucket.rst6
-rw-r--r--doc/source/api/nova..objectstore.handler.rst6
-rw-r--r--doc/source/api/nova..objectstore.image.rst6
-rw-r--r--doc/source/api/nova..objectstore.stored.rst6
-rw-r--r--doc/source/api/nova..quota.rst6
-rw-r--r--doc/source/api/nova..rpc.rst6
-rw-r--r--doc/source/api/nova..scheduler.chance.rst6
-rw-r--r--doc/source/api/nova..scheduler.driver.rst6
-rw-r--r--doc/source/api/nova..scheduler.manager.rst6
-rw-r--r--doc/source/api/nova..scheduler.simple.rst6
-rw-r--r--doc/source/api/nova..scheduler.zone.rst6
-rw-r--r--doc/source/api/nova..service.rst6
-rw-r--r--doc/source/api/nova..test.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_adminapi.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_common.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_faults.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_flavors.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_images.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_servers.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_zones.rst6
-rw-r--r--doc/source/api/nova..tests.api.test_wsgi.rst6
-rw-r--r--doc/source/api/nova..tests.db.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.declare_flags.rst6
-rw-r--r--doc/source/api/nova..tests.fake_flags.rst6
-rw-r--r--doc/source/api/nova..tests.glance.stubs.rst6
-rw-r--r--doc/source/api/nova..tests.hyperv_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.objectstore_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.real_flags.rst6
-rw-r--r--doc/source/api/nova..tests.runtime_flags.rst6
-rw-r--r--doc/source/api/nova..tests.test_access.rst6
-rw-r--r--doc/source/api/nova..tests.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.test_cloud.rst6
-rw-r--r--doc/source/api/nova..tests.test_compute.rst6
-rw-r--r--doc/source/api/nova..tests.test_console.rst6
-rw-r--r--doc/source/api/nova..tests.test_direct.rst6
-rw-r--r--doc/source/api/nova..tests.test_flags.rst6
-rw-r--r--doc/source/api/nova..tests.test_instance_types.rst6
-rw-r--r--doc/source/api/nova..tests.test_localization.rst6
-rw-r--r--doc/source/api/nova..tests.test_log.rst6
-rw-r--r--doc/source/api/nova..tests.test_middleware.rst6
-rw-r--r--doc/source/api/nova..tests.test_misc.rst6
-rw-r--r--doc/source/api/nova..tests.test_network.rst6
-rw-r--r--doc/source/api/nova..tests.test_quota.rst6
-rw-r--r--doc/source/api/nova..tests.test_rpc.rst6
-rw-r--r--doc/source/api/nova..tests.test_scheduler.rst6
-rw-r--r--doc/source/api/nova..tests.test_service.rst6
-rw-r--r--doc/source/api/nova..tests.test_test.rst6
-rw-r--r--doc/source/api/nova..tests.test_utils.rst6
-rw-r--r--doc/source/api/nova..tests.test_virt.rst6
-rw-r--r--doc/source/api/nova..tests.test_volume.rst6
-rw-r--r--doc/source/api/nova..tests.test_xenapi.rst6
-rw-r--r--doc/source/api/nova..tests.xenapi.stubs.rst6
-rw-r--r--doc/source/api/nova..utils.rst6
-rw-r--r--doc/source/api/nova..version.rst6
-rw-r--r--doc/source/api/nova..virt.connection.rst6
-rw-r--r--doc/source/api/nova..virt.disk.rst6
-rw-r--r--doc/source/api/nova..virt.fake.rst6
-rw-r--r--doc/source/api/nova..virt.hyperv.rst6
-rw-r--r--doc/source/api/nova..virt.images.rst6
-rw-r--r--doc/source/api/nova..virt.libvirt_conn.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.fake.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.network_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.vm_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.vmops.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.volume_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.volumeops.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi_conn.rst6
-rw-r--r--doc/source/api/nova..volume.api.rst6
-rw-r--r--doc/source/api/nova..volume.driver.rst6
-rw-r--r--doc/source/api/nova..volume.manager.rst6
-rw-r--r--doc/source/api/nova..volume.san.rst6
-rw-r--r--doc/source/api/nova..wsgi.rst6
-rw-r--r--doc/source/devref/index.rst2
-rw-r--r--doc/source/devref/threading.rst17
-rw-r--r--doc/source/devref/xensmvolume.rst88
-rw-r--r--doc/source/vmwareapi_readme.rst8
-rw-r--r--etc/nova/api-paste.ini15
-rw-r--r--nova/api/direct.py8
-rw-r--r--nova/api/ec2/__init__.py20
-rw-r--r--nova/api/ec2/apirequest.py14
-rw-r--r--nova/api/ec2/cloud.py37
-rw-r--r--nova/api/ec2/ec2utils.py4
-rw-r--r--nova/api/ec2/faults.py64
-rw-r--r--nova/api/manager.py2
-rw-r--r--nova/api/openstack/common.py3
-rw-r--r--nova/api/openstack/contrib/admin_actions.py14
-rw-r--r--nova/api/openstack/contrib/createserverext.py10
-rw-r--r--nova/api/openstack/contrib/deferred_delete.py7
-rw-r--r--nova/api/openstack/contrib/diskconfig.py3
-rw-r--r--nova/api/openstack/contrib/extended_status.py110
-rw-r--r--nova/api/openstack/contrib/flavorextraspecs.py6
-rw-r--r--nova/api/openstack/contrib/multinic.py25
-rw-r--r--nova/api/openstack/contrib/quotas.py4
-rw-r--r--nova/api/openstack/contrib/rescue.py20
-rw-r--r--nova/api/openstack/contrib/security_groups.py2
-rw-r--r--nova/api/openstack/contrib/volumetypes.py7
-rw-r--r--nova/api/openstack/extensions.py2
-rw-r--r--nova/api/openstack/faults.py5
-rw-r--r--nova/api/openstack/flavors.py3
-rw-r--r--nova/api/openstack/limits.py2
-rw-r--r--nova/api/openstack/server_metadata.py12
-rw-r--r--nova/api/openstack/servers.py122
-rw-r--r--nova/api/openstack/views/flavors.py1
-rw-r--r--nova/api/openstack/views/limits.py1
-rw-r--r--nova/api/openstack/views/servers.py10
-rw-r--r--nova/api/openstack/wsgi.py5
-rw-r--r--nova/auth/fakeldap.py4
-rw-r--r--nova/auth/manager.py4
-rw-r--r--nova/compute/api.py264
-rw-r--r--nova/compute/manager.py115
-rw-r--r--nova/compute/task_states.py1
-rw-r--r--nova/context.py3
-rw-r--r--nova/crypto.py2
-rw-r--r--nova/db/api.py98
-rw-r--r--nova/db/sqlalchemy/api.py169
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py98
-rw-r--r--nova/db/sqlalchemy/models.py28
-rw-r--r--nova/exception.py21
-rw-r--r--nova/fakerabbit.py2
-rw-r--r--nova/flags.py2
-rw-r--r--nova/image/fake.py8
-rw-r--r--nova/image/glance.py2
-rw-r--r--[-rwxr-xr-x]nova/local.py (renamed from builddeb.sh)25
-rw-r--r--nova/log.py14
-rwxr-xr-xnova/network/linux_net.py94
-rw-r--r--nova/network/manager.py29
-rw-r--r--nova/network/quantum/client.py2
-rw-r--r--nova/network/quantum/manager.py318
-rw-r--r--nova/network/quantum/melange_connection.py29
-rw-r--r--nova/network/quantum/melange_ipam_lib.py100
-rw-r--r--nova/network/quantum/nova_ipam_lib.py37
-rw-r--r--nova/network/quantum/quantum_connection.py31
-rw-r--r--nova/notifier/list_notifier.py2
-rw-r--r--nova/quota.py5
-rw-r--r--nova/rpc/common.py4
-rw-r--r--nova/rpc/impl_carrot.py2
-rw-r--r--nova/rpc/impl_kombu.py4
-rw-r--r--nova/scheduler/api.py8
-rw-r--r--nova/scheduler/chance.py6
-rw-r--r--nova/scheduler/distributed_scheduler.py74
-rw-r--r--nova/scheduler/driver.py19
-rw-r--r--nova/scheduler/filters/abstract_filter.py2
-rw-r--r--nova/scheduler/filters/all_hosts_filter.py2
-rw-r--r--nova/scheduler/filters/instance_type_filter.py2
-rw-r--r--nova/scheduler/filters/json_filter.py2
-rw-r--r--nova/scheduler/least_cost.py86
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/scheduler_options.py98
-rw-r--r--nova/scheduler/simple.py74
-rw-r--r--nova/scheduler/vsa.py5
-rw-r--r--nova/scheduler/zone.py77
-rw-r--r--nova/scheduler/zone_manager.py28
-rw-r--r--nova/test.py4
-rw-r--r--nova/tests/__init__.py1
-rw-r--r--nova/tests/api/ec2/test_cloud.py59
-rw-r--r--nova/tests/api/ec2/test_faults.py34
-rw-r--r--nova/tests/api/openstack/contrib/test_admin_actions.py23
-rw-r--r--nova/tests/api/openstack/contrib/test_createserverext.py5
-rw-r--r--nova/tests/api/openstack/contrib/test_extendedstatus.py109
-rw-r--r--nova/tests/api/openstack/contrib/test_flavors_extra_specs.py (renamed from nova/tests/api/openstack/test_flavors_extra_specs.py)116
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py103
-rw-r--r--nova/tests/api/openstack/contrib/test_multinic_xs.py26
-rw-r--r--nova/tests/api/openstack/contrib/test_quotas.py77
-rw-r--r--nova/tests/api/openstack/contrib/test_rescue.py10
-rw-r--r--nova/tests/api/openstack/contrib/test_security_groups.py1222
-rw-r--r--nova/tests/api/openstack/contrib/test_volume_types.py (renamed from nova/tests/api/openstack/test_volume_types.py)74
-rw-r--r--nova/tests/api/openstack/contrib/test_volume_types_extra_specs.py (renamed from nova/tests/api/openstack/test_volume_types_extra_specs.py)110
-rw-r--r--nova/tests/api/openstack/contrib/test_volumes.py3
-rw-r--r--nova/tests/api/openstack/test_consoles.py171
-rw-r--r--nova/tests/api/openstack/test_extensions.py1
-rw-r--r--nova/tests/api/openstack/test_server_actions.py25
-rw-r--r--nova/tests/api/openstack/test_servers.py116
-rw-r--r--nova/tests/api/openstack/test_users.py157
-rw-r--r--nova/tests/api/openstack/test_zones.py227
-rw-r--r--nova/tests/glance/stubs.py9
-rw-r--r--nova/tests/integrated/test_servers.py4
-rw-r--r--nova/tests/scheduler/test_distributed_scheduler.py21
-rw-r--r--nova/tests/scheduler/test_host_filter.py51
-rw-r--r--nova/tests/scheduler/test_least_cost.py51
-rw-r--r--nova/tests/scheduler/test_scheduler.py183
-rw-r--r--nova/tests/scheduler/test_scheduler_options.py138
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py13
-rw-r--r--nova/tests/test_compute.py956
-rw-r--r--nova/tests/test_compute_utils.py4
-rw-r--r--nova/tests/test_image.py11
-rw-r--r--nova/tests/test_libvirt.py201
-rw-r--r--nova/tests/test_network.py61
-rw-r--r--nova/tests/test_nova_manage.py24
-rw-r--r--nova/tests/test_quantum.py165
-rw-r--r--nova/tests/test_quota.py19
-rw-r--r--nova/tests/test_vmwareapi.py9
-rw-r--r--nova/tests/test_xenapi.py74
-rw-r--r--nova/tests/xenapi/stubs.py8
-rw-r--r--nova/utils.py39
-rw-r--r--nova/virt/driver.py15
-rw-r--r--nova/virt/fake.py2
-rw-r--r--nova/virt/hyperv.py2
-rw-r--r--nova/virt/libvirt/connection.py231
-rw-r--r--nova/virt/libvirt/vif.py11
-rw-r--r--nova/virt/vmwareapi/vmops.py2
-rw-r--r--nova/virt/vmwareapi_conn.py4
-rw-r--r--nova/virt/xenapi/fake.py64
-rw-r--r--nova/virt/xenapi/vm_utils.py81
-rw-r--r--nova/virt/xenapi/vmops.py63
-rw-r--r--nova/virt/xenapi/volume_utils.py236
-rw-r--r--nova/virt/xenapi/volumeops.py208
-rw-r--r--nova/virt/xenapi_conn.py25
-rw-r--r--nova/volume/api.py2
-rw-r--r--nova/volume/driver.py4
-rw-r--r--nova/volume/manager.py38
-rw-r--r--nova/volume/xensm.py244
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/glance113
-rwxr-xr-xrun_tests.sh12
-rwxr-xr-xtools/clean-vlans2
-rw-r--r--tools/install_venv.py58
292 files changed, 5866 insertions, 4202 deletions
diff --git a/.mailmap b/.mailmap
index f081ccf1b..735615afe 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1,6 +1,7 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>
+<aaron.lee@rackspace.com> <wwkeyboard@gmail.com>
<anotherjesse@gmail.com> <jesse@dancelamb>
<anotherjesse@gmail.com> <jesse@gigantor.local>
<anotherjesse@gmail.com> <jesse@ubuntu>
@@ -59,3 +60,4 @@
<kshileev@gmail.com> <kshileev@griddynamics.com>
<nsokolov@griddynamics.com> <nsokolov@griddynamics.net>
<troy.toman@rackspace.com> <ttcl@mac.com>
+<zulcss@ubuntu.com> <chuck.short@canonical.com>
diff --git a/Authors b/Authors
index 8bb807041..47288c9ed 100644
--- a/Authors
+++ b/Authors
@@ -4,6 +4,7 @@ Adam Johnson <adjohn@gmail.com>
Ahmad Hassan <ahmad.hassan@hp.com>
Alex Meade <alex.meade@rackspace.com>
Alexander Sakhnov <asakhnov@mirantis.com>
+Alvaro Lopez Garcia <aloga@ifca.unican.es>
Andrey Brindeyev <abrindeyev@griddynamics.com>
Andy Smith <code@term.ie>
Andy Southgate <andy.southgate@citrix.com>
@@ -34,6 +35,7 @@ Dave Walker <DaveWalker@ubuntu.com>
David Pravec <David.Pravec@danix.org>
David Subiros <david.perez5@hp.com>
Dean Troyer <dtroyer@gmail.com>
+Derek Higgins <higginsd@gmail.com>
Devendra Modium <dmodium@isi.edu>
Devin Carlen <devin.carlen@gmail.com>
Donal Lafferty <donal.lafferty@citrix.com>
@@ -44,6 +46,7 @@ Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
+Gary Kotton <garyk@radware.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ilyaalekseyev@acm.org>
diff --git a/MANIFEST.in b/MANIFEST.in
index f399663c9..be3e95c4d 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,12 +1,11 @@
include HACKING LICENSE run_tests.py run_tests.sh
-include README builddeb.sh exercise_rsapi.py
+include README exercise_rsapi.py
include ChangeLog MANIFEST.in pylintrc Authors
graft nova/CA
graft doc
graft smoketests
graft tools
graft etc
-graft bzrplugins
graft contrib
graft po
graft plugins
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
index 660eff9d0..64e2b17e9 100755
--- a/bin/nova-ajax-console-proxy
+++ b/bin/nova-ajax-console-proxy
@@ -100,7 +100,8 @@ class AjaxConsoleProxy(object):
% (req_url, str(env)))
start_response("401 NOT AUTHORIZED", [])
return "Not Authorized"
- except Exception:
+ except Exception, exc:
+ LOG.exception(exc)
start_response("500 ERROR", [])
return "Server Error"
@@ -139,4 +140,4 @@ if __name__ == '__main__':
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
service.serve(server)
service.wait()
- self.conn.close()
+ acp.conn.close()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 1c9ae951e..25d3e181d 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -90,7 +90,8 @@ def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
network_ref = db.network_get(ctxt, network_id)
- return linux_net.get_dhcp_leases(ctxt, network_ref)
+ network_manager = utils.import_object(FLAGS.network_manager)
+ return network_manager.get_dhcp_leases(ctxt, network_ref)
def main():
diff --git a/bin/nova-manage b/bin/nova-manage
index 244c11869..382260ca8 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -727,6 +727,7 @@ class NetworkCommands(object):
@args('--vpn', dest="vpn_start", help='vpn start')
@args('--fixed_range_v6', dest="fixed_range_v6",
help='IPv6 subnet (ex: fe80::/64')
+ @args('--gateway', dest="gateway", help='gateway')
@args('--gateway_v6', dest="gateway_v6", help='ipv6 gateway')
@args('--bridge', dest="bridge",
metavar='<bridge>',
@@ -746,9 +747,10 @@ class NetworkCommands(object):
help='Network interface priority')
def create(self, label=None, fixed_range_v4=None, num_networks=None,
network_size=None, multi_host=None, vlan_start=None,
- vpn_start=None, fixed_range_v6=None, gateway_v6=None,
- bridge=None, bridge_interface=None, dns1=None, dns2=None,
- project_id=None, priority=None, uuid=None):
+ vpn_start=None, fixed_range_v6=None, gateway=None,
+ gateway_v6=None, bridge=None, bridge_interface=None,
+ dns1=None, dns2=None, project_id=None, priority=None,
+ uuid=None):
"""Creates fixed ips for host by range"""
# check for certain required inputs
@@ -811,6 +813,7 @@ class NetworkCommands(object):
vlan_start=int(vlan_start),
vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
+ gateway=gateway,
gateway_v6=gateway_v6,
bridge=bridge,
bridge_interface=bridge_interface,
@@ -858,14 +861,26 @@ class NetworkCommands(object):
network.cidr,
network.cidr_v6)
- @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
+ @args('--fixed_range', dest="fixed_range", metavar='<x.x.x.x/yy>',
help='Network to delete')
- def delete(self, fixed_range):
+ @args('--uuid', dest='uuid', metavar='<uuid>',
+ help='UUID of network to delete')
+ def delete(self, fixed_range=None, uuid=None):
"""Deletes a network"""
- # delete the network
+ if fixed_range is None and uuid is None:
+ raise Exception("Please specify either fixed_range or uuid")
+
net_manager = utils.import_object(FLAGS.network_manager)
- net_manager.delete_network(context.get_admin_context(), fixed_range)
+ if "QuantumManager" in FLAGS.network_manager:
+ if uuid is None:
+ raise Exception("UUID is required to delete Quantum Networks")
+ if fixed_range:
+ raise Exception("Deleting by fixed_range is not supported " \
+ "with the QuantumManager")
+ # delete the network
+ net_manager.delete_network(context.get_admin_context(),
+ fixed_range, uuid)
@args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
help='Network to modify')
@@ -1809,6 +1824,7 @@ class ImageCommands(object):
return new
except Exception as exc:
print _("Failed to register %(path)s: %(exc)s") % locals()
+ sys.exit(1)
@args('--image', dest='image', metavar='<image>', help='Image')
@args('--kernel', dest='kernel', metavar='<kernel>', help='Kernel')
@@ -1947,6 +1963,147 @@ class ImageCommands(object):
self._convert_images(machine_images)
+class StorageManagerCommands(object):
+ """Class for mangaging Storage Backends and Flavors"""
+
+ def flavor_list(self, flavor=None):
+ ctxt = context.get_admin_context()
+
+ try:
+ if flavor == None:
+ flavors = db.sm_flavor_get_all(ctxt)
+ else:
+ flavors = db.sm_flavor_get(ctxt, flavor)
+ except exception.NotFound as ex:
+ print "error: %s" % ex
+ sys.exit(2)
+
+ print "%-18s\t%-20s\t%s" % (_('id'),
+ _('Label'),
+ _('Description'))
+
+ for flav in flavors:
+ print "%-18s\t%-20s\t%s" % (
+ flav['id'],
+ flav['label'],
+ flav['description'])
+
+ def flavor_create(self, label, desc):
+ # TODO flavor name must be unique
+ try:
+ db.sm_flavor_create(context.get_admin_context(),
+ dict(label=label,
+ description=desc))
+ except exception.DBError, e:
+ _db_error(e)
+
+ def flavor_delete(self, label):
+ try:
+ db.sm_flavor_delete(context.get_admin_context(), label)
+
+ except exception.DBError, e:
+ _db_error(e)
+
+ def _splitfun(self, item):
+ i = item.split("=")
+ return i[0:2]
+
+ def backend_list(self, backend_conf_id=None):
+ ctxt = context.get_admin_context()
+
+ try:
+ if backend_conf_id == None:
+ backends = db.sm_backend_conf_get_all(ctxt)
+ else:
+ backends = db.sm_backend_conf_get(ctxt, backend_conf_id)
+
+ except exception.NotFound as ex:
+ print "error: %s" % ex
+ sys.exit(2)
+
+ print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
+ _('Flavor id'),
+ _('SR UUID'),
+ _('SR Type'),
+ _('Config Parameters'),)
+
+ for b in backends:
+ print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
+ b['flavor_id'],
+ b['sr_uuid'],
+ b['sr_type'],
+ b['config_params'],)
+
+ def backend_add(self, flavor_label, sr_type, *args):
+ # TODO Add backend_introduce.
+ ctxt = context.get_admin_context()
+ params = dict(map(self._splitfun, args))
+
+ if 'sr_uuid' in params:
+ try:
+ backend = db.sm_backend_conf_get_by_sr(ctxt,
+ params['sr_uuid'])
+ except exception.DBError, e:
+ _db_error(e)
+
+ if backend:
+ if len(backend) > 1:
+ print 'error: Multiple backends found with given sr_uuid'
+ sys.exit(2)
+
+ print 'Backend config found. Would you like to recreate this?'
+ print '(WARNING:Recreating will destroy all VDIs on backend!!)'
+ c = raw_input('Proceed? (y/n) ')
+ if c == 'y' or c == 'Y':
+ try:
+ db.sm_backend_conf_update(ctxt, backend['id'],
+ dict(created=False))
+ except exception.DBError, e:
+ _db_error(e)
+ return
+
+ else:
+ print 'Backend config not found. Would you like to create it?'
+ print '(WARNING: Creating will destroy all data on backend!!!)'
+ c = raw_input('Proceed? (y/n) ')
+ if c != 'y' and c != 'Y':
+ return
+
+ print '(WARNING: Creating will destroy all data on backend!!!)'
+ c = raw_input('Proceed? (y/n) ')
+ if c == 'y' or c == 'Y':
+ if flavor_label == None:
+ print "error: backend needs to be associated with flavor"
+ sys.exit(2)
+
+ try:
+ flavors = db.sm_flavor_get(ctxt, flavor_label)
+
+ except exception.NotFound as ex:
+ print "error: %s" % ex
+ sys.exit(2)
+
+ config_params = "".join(['%s=%s ' %
+ (key, params[key]) for key in params])
+
+ try:
+ db.sm_backend_conf_create(ctxt,
+ dict(flavor_id=flavors[0]['id'],
+ sr_uuid=None,
+ sr_type=sr_type,
+ config_params=config_params))
+ except exception.DBError, e:
+ _db_error(e)
+
+ def backend_remove(self, backend_conf_id):
+ try:
+ db.sm_backend_conf_delete(context.get_admin_context(),
+ backend_conf_id)
+
+ except exception.DBError, e:
+ _db_error(e)
+
+
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@@ -2017,6 +2174,47 @@ class ConfigCommands(object):
print FLAGS.FlagsIntoString()
+class GetLogCommands(object):
+ """Get logging information"""
+
+ def errors(self):
+ """Get all of the errors from the log files"""
+ if FLAGS.logdir:
+ error_found = 0
+ logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
+ for file in logs:
+ log_file = os.path.join(FLAGS.logdir, file)
+ lines = [line.strip() for line in open(log_file, "r")]
+ lines.reverse()
+ print_name = 0
+ for index, line in enumerate(lines):
+ if line.find(" ERROR ") > 0:
+ error_found += 1
+ if print_name == 0:
+ print log_file + ":-"
+ print_name = 1
+ print "Line %d : %s" % (len(lines) - index, line)
+ if error_found == 0:
+ print "No errors in logfiles!"
+
+ def syslog(self, num_entries=10):
+ """Get <num_entries> of the nova syslog events"""
+ entries = int(num_entries)
+ count = 0
+ lines = [line.strip() for line in open('/var/log/syslog', "r")]
+ lines.reverse()
+ print "Last %s nova syslog entries:-" % (entries)
+ for line in lines:
+ if line.find("nova") > 0:
+ count += 1
+ print "%s" % (line)
+ if count == entries:
+ break
+
+ if count == 0:
+ print "No nova entries in syslog!"
+
+
CATEGORIES = [
('account', AccountCommands),
('agent', AgentBuildCommands),
@@ -2034,12 +2232,14 @@ CATEGORIES = [
('role', RoleCommands),
('service', ServiceCommands),
('shell', ShellCommands),
+ ('sm', StorageManagerCommands),
('user', UserCommands),
('version', VersionCommands),
('vm', VmCommands),
('volume', VolumeCommands),
('vpn', VpnCommands),
- ('vsa', VsaCommands)]
+ ('vsa', VsaCommands),
+ ('logs', GetLogCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/bin/stack b/bin/stack
index a1c6d1348..c8a99f073 100755
--- a/bin/stack
+++ b/bin/stack
@@ -118,6 +118,9 @@ def do_request(controller, method, params=None):
req = urllib2.Request(url, data, headers)
try:
resp = urllib2.urlopen(req)
+ except urllib2.URLError, e:
+ print 'Failed to connect to %s: %s' % (url, e.reason)
+ sys.exit(1)
except urllib2.HTTPError, e:
print e.read()
sys.exit(1)
diff --git a/bzrplugins/novalog/__init__.py b/bzrplugins/novalog/__init__.py
deleted file mode 100644
index 9817dc886..000000000
--- a/bzrplugins/novalog/__init__.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2010 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Log format for Nova's changelog."""
-
-import bzrlib.log
-from bzrlib.osutils import format_date
-
-
-class NovaLogFormat(bzrlib.log.GnuChangelogLogFormatter):
- """This is mostly stolen from bzrlib.log.GnuChangelogLogFormatter
- The difference is that it logs the author rather than the committer
- which for Nova always is Tarmac."""
-
- preferred_levels = 1
-
- def log_revision(self, revision):
- """Log a revision, either merged or not."""
- to_file = self.to_file
-
- date_str = format_date(revision.rev.timestamp,
- revision.rev.timezone or 0,
- self.show_timezone,
- date_fmt='%Y-%m-%d',
- show_offset=False)
-
- authors = revision.rev.get_apparent_authors()
- to_file.write('%s %s\n\n' % (date_str, ", ".join(authors)))
-
- if revision.delta is not None and revision.delta.has_changed():
- for c in revision.delta.added + revision.delta.removed + \
- revision.delta.modified:
- path, = c[:1]
- to_file.write('\t* %s:\n' % (path,))
- for c in revision.delta.renamed:
- oldpath, newpath = c[:2]
- # For renamed files, show both the old and the new path
- to_file.write('\t* %s:\n\t* %s:\n' % (oldpath, newpath))
- to_file.write('\n')
-
- if not revision.rev.message:
- to_file.write('\tNo commit message\n')
- else:
- message = revision.rev.message.rstrip('\r\n')
- for l in message.split('\n'):
- to_file.write('\t%s\n' % (l.lstrip(),))
- to_file.write('\n')
-
-bzrlib.log.register_formatter('novalog', NovaLogFormat)
diff --git a/contrib/redhat-eventlet.patch b/contrib/redhat-eventlet.patch
new file mode 100644
index 000000000..cf2ff53d5
--- /dev/null
+++ b/contrib/redhat-eventlet.patch
@@ -0,0 +1,16 @@
+--- .nova-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py.orig
+2011-05-25
+23:31:34.597271402 +0000
++++ .nova-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py
+2011-05-25
+23:33:24.055602468 +0000
+@@ -32,7 +32,7 @@
+ setattr(self, attr, wrapped_pipe)
+ __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
+
+- def wait(self, check_interval=0.01):
++ def wait(self, check_interval=0.01, timeout=None):
+ # Instead of a blocking OS call, this version of wait() uses logic
+ # borrowed from the eventlet 0.2 processes.Process.wait() method.
+ try:
+
diff --git a/doc/find_autodoc_modules.sh b/doc/find_autodoc_modules.sh
index c612b0084..b44f060fb 100755
--- a/doc/find_autodoc_modules.sh
+++ b/doc/find_autodoc_modules.sh
@@ -8,7 +8,7 @@ for x in `find ${NOVA_DIR} -name '*.py'`; do
if [ `basename ${x} .py` == "__init__" ] ; then
continue
fi
- relative=nova.`echo ${x} | sed -e 's$^'${NOVA_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'`
+ relative=nova`echo ${x} | sed -e 's$^'${NOVA_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'`
modules="${modules} ${relative}"
done
diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst
deleted file mode 100644
index d99d16eaa..000000000
--- a/doc/source/api/autoindex.rst
+++ /dev/null
@@ -1,141 +0,0 @@
-.. toctree::
- :maxdepth: 1
-
- nova..adminclient.rst
- nova..api.direct.rst
- nova..api.ec2.admin.rst
- nova..api.ec2.apirequest.rst
- nova..api.ec2.cloud.rst
- nova..api.ec2.metadatarequesthandler.rst
- nova..api.openstack.auth.rst
- nova..api.openstack.backup_schedules.rst
- nova..api.openstack.common.rst
- nova..api.openstack.consoles.rst
- nova..api.openstack.faults.rst
- nova..api.openstack.flavors.rst
- nova..api.openstack.images.rst
- nova..api.openstack.servers.rst
- nova..api.openstack.shared_ip_groups.rst
- nova..api.openstack.zones.rst
- nova..auth.dbdriver.rst
- nova..auth.fakeldap.rst
- nova..auth.ldapdriver.rst
- nova..auth.manager.rst
- nova..auth.signer.rst
- nova..cloudpipe.pipelib.rst
- nova..compute.api.rst
- nova..compute.instance_types.rst
- nova..compute.manager.rst
- nova..compute.power_state.rst
- nova..console.api.rst
- nova..console.fake.rst
- nova..console.manager.rst
- nova..console.xvp.rst
- nova..context.rst
- nova..crypto.rst
- nova..db.api.rst
- nova..db.base.rst
- nova..db.migration.rst
- nova..db.sqlalchemy.api.rst
- nova..db.sqlalchemy.migrate_repo.manage.rst
- nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
- nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
- nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
- nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
- nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
- nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
- nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
- nova..db.sqlalchemy.migration.rst
- nova..db.sqlalchemy.models.rst
- nova..db.sqlalchemy.session.rst
- nova..exception.rst
- nova..fakememcache.rst
- nova..fakerabbit.rst
- nova..flags.rst
- nova..image.glance.rst
- nova..image.local.rst
- nova..image.s3.rst
- nova..image.service.rst
- nova..log.rst
- nova..manager.rst
- nova..network.api.rst
- nova..network.linux_net.rst
- nova..network.manager.rst
- nova..objectstore.bucket.rst
- nova..objectstore.handler.rst
- nova..objectstore.image.rst
- nova..objectstore.stored.rst
- nova..quota.rst
- nova..rpc.rst
- nova..scheduler.chance.rst
- nova..scheduler.driver.rst
- nova..scheduler.manager.rst
- nova..scheduler.simple.rst
- nova..scheduler.zone.rst
- nova..service.rst
- nova..test.rst
- nova..tests.api.openstack.fakes.rst
- nova..tests.api.openstack.test_adminapi.rst
- nova..tests.api.openstack.test_api.rst
- nova..tests.api.openstack.test_auth.rst
- nova..tests.api.openstack.test_common.rst
- nova..tests.api.openstack.test_faults.rst
- nova..tests.api.openstack.test_flavors.rst
- nova..tests.api.openstack.test_images.rst
- nova..tests.api.openstack.test_ratelimiting.rst
- nova..tests.api.openstack.test_servers.rst
- nova..tests.api.openstack.test_shared_ip_groups.rst
- nova..tests.api.openstack.test_zones.rst
- nova..tests.api.test_wsgi.rst
- nova..tests.db.fakes.rst
- nova..tests.declare_flags.rst
- nova..tests.fake_flags.rst
- nova..tests.glance.stubs.rst
- nova..tests.hyperv_unittest.rst
- nova..tests.objectstore_unittest.rst
- nova..tests.real_flags.rst
- nova..tests.runtime_flags.rst
- nova..tests.test_access.rst
- nova..tests.test_api.rst
- nova..tests.test_auth.rst
- nova..tests.test_cloud.rst
- nova..tests.test_compute.rst
- nova..tests.test_console.rst
- nova..tests.test_direct.rst
- nova..tests.test_flags.rst
- nova..tests.test_instance_types.rst
- nova..tests.test_localization.rst
- nova..tests.test_log.rst
- nova..tests.test_middleware.rst
- nova..tests.test_misc.rst
- nova..tests.test_network.rst
- nova..tests.test_quota.rst
- nova..tests.test_rpc.rst
- nova..tests.test_scheduler.rst
- nova..tests.test_service.rst
- nova..tests.test_test.rst
- nova..tests.test_utils.rst
- nova..tests.test_virt.rst
- nova..tests.test_volume.rst
- nova..tests.test_xenapi.rst
- nova..tests.xenapi.stubs.rst
- nova..utils.rst
- nova..version.rst
- nova..virt.connection.rst
- nova..virt.disk.rst
- nova..virt.fake.rst
- nova..virt.hyperv.rst
- nova..virt.images.rst
- nova..virt.libvirt_conn.rst
- nova..virt.xenapi.fake.rst
- nova..virt.xenapi.network_utils.rst
- nova..virt.xenapi.vm_utils.rst
- nova..virt.xenapi.vmops.rst
- nova..virt.xenapi.volume_utils.rst
- nova..virt.xenapi.volumeops.rst
- nova..virt.xenapi_conn.rst
- nova..volume.api.rst
- nova..volume.driver.rst
- nova..volume.manager.rst
- nova..volume.san.rst
- nova..wsgi.rst
diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst
deleted file mode 100644
index 35fa839e1..000000000
--- a/doc/source/api/nova..adminclient.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..adminclient` Module
-==============================================================================
-.. automodule:: nova..adminclient
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.direct.rst b/doc/source/api/nova..api.direct.rst
deleted file mode 100644
index a1705c707..000000000
--- a/doc/source/api/nova..api.direct.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.direct` Module
-==============================================================================
-.. automodule:: nova..api.direct
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst
deleted file mode 100644
index 4e9ab308b..000000000
--- a/doc/source/api/nova..api.ec2.admin.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.admin` Module
-==============================================================================
-.. automodule:: nova..api.ec2.admin
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst
deleted file mode 100644
index c17a2ff3a..000000000
--- a/doc/source/api/nova..api.ec2.apirequest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.apirequest` Module
-==============================================================================
-.. automodule:: nova..api.ec2.apirequest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst
deleted file mode 100644
index f6145c217..000000000
--- a/doc/source/api/nova..api.ec2.cloud.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.cloud` Module
-==============================================================================
-.. automodule:: nova..api.ec2.cloud
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
deleted file mode 100644
index 75f5169e5..000000000
--- a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.metadatarequesthandler` Module
-==============================================================================
-.. automodule:: nova..api.ec2.metadatarequesthandler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst
deleted file mode 100644
index 8c3f8f2da..000000000
--- a/doc/source/api/nova..api.openstack.auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.auth` Module
-==============================================================================
-.. automodule:: nova..api.openstack.auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst
deleted file mode 100644
index 6b406f12d..000000000
--- a/doc/source/api/nova..api.openstack.backup_schedules.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.backup_schedules` Module
-==============================================================================
-.. automodule:: nova..api.openstack.backup_schedules
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.common.rst b/doc/source/api/nova..api.openstack.common.rst
deleted file mode 100644
index 4fd734790..000000000
--- a/doc/source/api/nova..api.openstack.common.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.common` Module
-==============================================================================
-.. automodule:: nova..api.openstack.common
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.consoles.rst b/doc/source/api/nova..api.openstack.consoles.rst
deleted file mode 100644
index 1e3e09599..000000000
--- a/doc/source/api/nova..api.openstack.consoles.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.consoles` Module
-==============================================================================
-.. automodule:: nova..api.openstack.consoles
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst
deleted file mode 100644
index 7b25561f7..000000000
--- a/doc/source/api/nova..api.openstack.faults.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.faults` Module
-==============================================================================
-.. automodule:: nova..api.openstack.faults
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst
deleted file mode 100644
index 0deb724de..000000000
--- a/doc/source/api/nova..api.openstack.flavors.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.flavors` Module
-==============================================================================
-.. automodule:: nova..api.openstack.flavors
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst
deleted file mode 100644
index 82bd5f1e8..000000000
--- a/doc/source/api/nova..api.openstack.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.images` Module
-==============================================================================
-.. automodule:: nova..api.openstack.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst
deleted file mode 100644
index c36856ea2..000000000
--- a/doc/source/api/nova..api.openstack.servers.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.servers` Module
-==============================================================================
-.. automodule:: nova..api.openstack.servers
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.shared_ip_groups.rst b/doc/source/api/nova..api.openstack.shared_ip_groups.rst
deleted file mode 100644
index 4b1f44efe..000000000
--- a/doc/source/api/nova..api.openstack.shared_ip_groups.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.shared_ip_groups` Module
-==============================================================================
-.. automodule:: nova..api.openstack.shared_ip_groups
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.zones.rst b/doc/source/api/nova..api.openstack.zones.rst
deleted file mode 100644
index ebe4569c5..000000000
--- a/doc/source/api/nova..api.openstack.zones.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.zones` Module
-==============================================================================
-.. automodule:: nova..api.openstack.zones
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst
deleted file mode 100644
index 7de68b6e0..000000000
--- a/doc/source/api/nova..auth.dbdriver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.dbdriver` Module
-==============================================================================
-.. automodule:: nova..auth.dbdriver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst
deleted file mode 100644
index ca8a3ad4d..000000000
--- a/doc/source/api/nova..auth.fakeldap.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.fakeldap` Module
-==============================================================================
-.. automodule:: nova..auth.fakeldap
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst
deleted file mode 100644
index c44463522..000000000
--- a/doc/source/api/nova..auth.ldapdriver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.ldapdriver` Module
-==============================================================================
-.. automodule:: nova..auth.ldapdriver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst
deleted file mode 100644
index bc5ce2ec3..000000000
--- a/doc/source/api/nova..auth.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.manager` Module
-==============================================================================
-.. automodule:: nova..auth.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst
deleted file mode 100644
index aad824ead..000000000
--- a/doc/source/api/nova..auth.signer.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.signer` Module
-==============================================================================
-.. automodule:: nova..auth.signer
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst
deleted file mode 100644
index 054aaf484..000000000
--- a/doc/source/api/nova..cloudpipe.pipelib.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..cloudpipe.pipelib` Module
-==============================================================================
-.. automodule:: nova..cloudpipe.pipelib
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.api.rst b/doc/source/api/nova..compute.api.rst
deleted file mode 100644
index caa66313a..000000000
--- a/doc/source/api/nova..compute.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.api` Module
-==============================================================================
-.. automodule:: nova..compute.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst
deleted file mode 100644
index d206ff3a4..000000000
--- a/doc/source/api/nova..compute.instance_types.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.instance_types` Module
-==============================================================================
-.. automodule:: nova..compute.instance_types
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst
deleted file mode 100644
index 33a337c39..000000000
--- a/doc/source/api/nova..compute.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.manager` Module
-==============================================================================
-.. automodule:: nova..compute.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst
deleted file mode 100644
index 41b1080e5..000000000
--- a/doc/source/api/nova..compute.power_state.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.power_state` Module
-==============================================================================
-.. automodule:: nova..compute.power_state
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..console.api.rst b/doc/source/api/nova..console.api.rst
deleted file mode 100644
index 82a51d4c7..000000000
--- a/doc/source/api/nova..console.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..console.api` Module
-==============================================================================
-.. automodule:: nova..console.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..console.fake.rst b/doc/source/api/nova..console.fake.rst
deleted file mode 100644
index f053f85d6..000000000
--- a/doc/source/api/nova..console.fake.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..console.fake` Module
-==============================================================================
-.. automodule:: nova..console.fake
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..console.manager.rst b/doc/source/api/nova..console.manager.rst
deleted file mode 100644
index f9283a6c3..000000000
--- a/doc/source/api/nova..console.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..console.manager` Module
-==============================================================================
-.. automodule:: nova..console.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..console.xvp.rst b/doc/source/api/nova..console.xvp.rst
deleted file mode 100644
index a0887009e..000000000
--- a/doc/source/api/nova..console.xvp.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..console.xvp` Module
-==============================================================================
-.. automodule:: nova..console.xvp
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst
deleted file mode 100644
index 9de1adb24..000000000
--- a/doc/source/api/nova..context.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..context` Module
-==============================================================================
-.. automodule:: nova..context
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst
deleted file mode 100644
index af9f63634..000000000
--- a/doc/source/api/nova..crypto.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..crypto` Module
-==============================================================================
-.. automodule:: nova..crypto
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst
deleted file mode 100644
index 6d998fbb2..000000000
--- a/doc/source/api/nova..db.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.api` Module
-==============================================================================
-.. automodule:: nova..db.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.base.rst b/doc/source/api/nova..db.base.rst
deleted file mode 100644
index 29fb417d6..000000000
--- a/doc/source/api/nova..db.base.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.base` Module
-==============================================================================
-.. automodule:: nova..db.base
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.migration.rst b/doc/source/api/nova..db.migration.rst
deleted file mode 100644
index 71dfea301..000000000
--- a/doc/source/api/nova..db.migration.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.migration` Module
-==============================================================================
-.. automodule:: nova..db.migration
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst
deleted file mode 100644
index 76d0c1bd3..000000000
--- a/doc/source/api/nova..db.sqlalchemy.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.api` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
deleted file mode 100644
index 93decfb27..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.manage` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.manage
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
deleted file mode 100644
index 4b1219edb..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.001_austin` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.001_austin
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
deleted file mode 100644
index 82f1f4680..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.002_bexar` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.002_bexar
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
deleted file mode 100644
index 98f3e8da7..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
deleted file mode 100644
index 5cbb81191..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
deleted file mode 100644
index cef0c243e..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
deleted file mode 100644
index a15697196..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
deleted file mode 100644
index 38842d1af..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migration.rst b/doc/source/api/nova..db.sqlalchemy.migration.rst
deleted file mode 100644
index 3a9b01b9a..000000000
--- a/doc/source/api/nova..db.sqlalchemy.migration.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.migration` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.migration
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst
deleted file mode 100644
index 9c795d7f5..000000000
--- a/doc/source/api/nova..db.sqlalchemy.models.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.models` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.models
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst
deleted file mode 100644
index cbfd6416a..000000000
--- a/doc/source/api/nova..db.sqlalchemy.session.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.session` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.session
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst
deleted file mode 100644
index 97ac6b752..000000000
--- a/doc/source/api/nova..exception.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..exception` Module
-==============================================================================
-.. automodule:: nova..exception
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..fakememcache.rst b/doc/source/api/nova..fakememcache.rst
deleted file mode 100644
index 7e7ffb98b..000000000
--- a/doc/source/api/nova..fakememcache.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..fakememcache` Module
-==============================================================================
-.. automodule:: nova..fakememcache
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst
deleted file mode 100644
index f1e27c266..000000000
--- a/doc/source/api/nova..fakerabbit.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..fakerabbit` Module
-==============================================================================
-.. automodule:: nova..fakerabbit
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst
deleted file mode 100644
index 08165be44..000000000
--- a/doc/source/api/nova..flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..flags` Module
-==============================================================================
-.. automodule:: nova..flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..image.glance.rst b/doc/source/api/nova..image.glance.rst
deleted file mode 100644
index b0882d5ec..000000000
--- a/doc/source/api/nova..image.glance.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..image.glance` Module
-==============================================================================
-.. automodule:: nova..image.glance
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..image.local.rst b/doc/source/api/nova..image.local.rst
deleted file mode 100644
index b6ad5470b..000000000
--- a/doc/source/api/nova..image.local.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..image.local` Module
-==============================================================================
-.. automodule:: nova..image.local
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..image.s3.rst b/doc/source/api/nova..image.s3.rst
deleted file mode 100644
index e5b236127..000000000
--- a/doc/source/api/nova..image.s3.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..image.s3` Module
-==============================================================================
-.. automodule:: nova..image.s3
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst
deleted file mode 100644
index 78ef1ecca..000000000
--- a/doc/source/api/nova..image.service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..image.service` Module
-==============================================================================
-.. automodule:: nova..image.service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..log.rst b/doc/source/api/nova..log.rst
deleted file mode 100644
index ff209709f..000000000
--- a/doc/source/api/nova..log.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..log` Module
-==============================================================================
-.. automodule:: nova..log
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst
deleted file mode 100644
index 576902491..000000000
--- a/doc/source/api/nova..manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..manager` Module
-==============================================================================
-.. automodule:: nova..manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.api.rst b/doc/source/api/nova..network.api.rst
deleted file mode 100644
index b63be2ba3..000000000
--- a/doc/source/api/nova..network.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.api` Module
-==============================================================================
-.. automodule:: nova..network.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst
deleted file mode 100644
index 7af78d5ad..000000000
--- a/doc/source/api/nova..network.linux_net.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.linux_net` Module
-==============================================================================
-.. automodule:: nova..network.linux_net
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst
deleted file mode 100644
index 0ea705533..000000000
--- a/doc/source/api/nova..network.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.manager` Module
-==============================================================================
-.. automodule:: nova..network.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst
deleted file mode 100644
index 3bfdf639c..000000000
--- a/doc/source/api/nova..objectstore.bucket.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.bucket` Module
-==============================================================================
-.. automodule:: nova..objectstore.bucket
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst
deleted file mode 100644
index 0eb8c4efb..000000000
--- a/doc/source/api/nova..objectstore.handler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.handler` Module
-==============================================================================
-.. automodule:: nova..objectstore.handler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst
deleted file mode 100644
index fa4c971f1..000000000
--- a/doc/source/api/nova..objectstore.image.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.image` Module
-==============================================================================
-.. automodule:: nova..objectstore.image
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst
deleted file mode 100644
index 2b1d997a3..000000000
--- a/doc/source/api/nova..objectstore.stored.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.stored` Module
-==============================================================================
-.. automodule:: nova..objectstore.stored
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst
deleted file mode 100644
index 4140d95d6..000000000
--- a/doc/source/api/nova..quota.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..quota` Module
-==============================================================================
-.. automodule:: nova..quota
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst
deleted file mode 100644
index 5b2a9b8e2..000000000
--- a/doc/source/api/nova..rpc.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..rpc` Module
-==============================================================================
-.. automodule:: nova..rpc
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst
deleted file mode 100644
index 89c074c8f..000000000
--- a/doc/source/api/nova..scheduler.chance.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.chance` Module
-==============================================================================
-.. automodule:: nova..scheduler.chance
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst
deleted file mode 100644
index 793ed9c7b..000000000
--- a/doc/source/api/nova..scheduler.driver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.driver` Module
-==============================================================================
-.. automodule:: nova..scheduler.driver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst
deleted file mode 100644
index d0fc7c423..000000000
--- a/doc/source/api/nova..scheduler.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.manager` Module
-==============================================================================
-.. automodule:: nova..scheduler.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst
deleted file mode 100644
index dacc2cf30..000000000
--- a/doc/source/api/nova..scheduler.simple.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.simple` Module
-==============================================================================
-.. automodule:: nova..scheduler.simple
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.zone.rst b/doc/source/api/nova..scheduler.zone.rst
deleted file mode 100644
index 54c4bf201..000000000
--- a/doc/source/api/nova..scheduler.zone.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.zone` Module
-==============================================================================
-.. automodule:: nova..scheduler.zone
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst
deleted file mode 100644
index 2d2dfcf2e..000000000
--- a/doc/source/api/nova..service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..service` Module
-==============================================================================
-.. automodule:: nova..service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst
deleted file mode 100644
index a6bdb6f1f..000000000
--- a/doc/source/api/nova..test.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..test` Module
-==============================================================================
-.. automodule:: nova..test
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst
deleted file mode 100644
index 4a9ff5938..000000000
--- a/doc/source/api/nova..tests.api.openstack.fakes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.fakes` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.fakes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_adminapi.rst b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst
deleted file mode 100644
index 19a85ca0f..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_adminapi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_adminapi` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_adminapi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst
deleted file mode 100644
index 68106d221..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_api` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst
deleted file mode 100644
index 9f0011669..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_auth` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_common.rst b/doc/source/api/nova..tests.api.openstack.test_common.rst
deleted file mode 100644
index 82f40ecb8..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_common.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_common` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_common
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst
deleted file mode 100644
index b839ae8a3..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_faults.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_faults` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_faults
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
deleted file mode 100644
index 471fac56e..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_flavors.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_flavors` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_flavors
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst
deleted file mode 100644
index 57ae93c8c..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_images` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
deleted file mode 100644
index 9a857f795..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_ratelimiting` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_ratelimiting
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst
deleted file mode 100644
index ea602e6ab..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_servers.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_servers` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_servers
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
deleted file mode 100644
index 48814af00..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_shared_ip_groups` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_shared_ip_groups
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_zones.rst b/doc/source/api/nova..tests.api.openstack.test_zones.rst
deleted file mode 100644
index ba7078e63..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_zones.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_zones` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_zones
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst
deleted file mode 100644
index 8e79caa4d..000000000
--- a/doc/source/api/nova..tests.api.test_wsgi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.test_wsgi` Module
-==============================================================================
-.. automodule:: nova..tests.api.test_wsgi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.db.fakes.rst b/doc/source/api/nova..tests.db.fakes.rst
deleted file mode 100644
index cc79e55e2..000000000
--- a/doc/source/api/nova..tests.db.fakes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.db.fakes` Module
-==============================================================================
-.. automodule:: nova..tests.db.fakes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst
deleted file mode 100644
index 524e72e91..000000000
--- a/doc/source/api/nova..tests.declare_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.declare_flags` Module
-==============================================================================
-.. automodule:: nova..tests.declare_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst
deleted file mode 100644
index a8dc3df36..000000000
--- a/doc/source/api/nova..tests.fake_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.fake_flags` Module
-==============================================================================
-.. automodule:: nova..tests.fake_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.glance.stubs.rst b/doc/source/api/nova..tests.glance.stubs.rst
deleted file mode 100644
index 7ef5fccbe..000000000
--- a/doc/source/api/nova..tests.glance.stubs.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.glance.stubs` Module
-==============================================================================
-.. automodule:: nova..tests.glance.stubs
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.hyperv_unittest.rst b/doc/source/api/nova..tests.hyperv_unittest.rst
deleted file mode 100644
index c08443121..000000000
--- a/doc/source/api/nova..tests.hyperv_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.hyperv_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.hyperv_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst
deleted file mode 100644
index 0ae252f04..000000000
--- a/doc/source/api/nova..tests.objectstore_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.objectstore_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.objectstore_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst
deleted file mode 100644
index e9c0d1abd..000000000
--- a/doc/source/api/nova..tests.real_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.real_flags` Module
-==============================================================================
-.. automodule:: nova..tests.real_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst
deleted file mode 100644
index 984e21199..000000000
--- a/doc/source/api/nova..tests.runtime_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.runtime_flags` Module
-==============================================================================
-.. automodule:: nova..tests.runtime_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_access.rst b/doc/source/api/nova..tests.test_access.rst
deleted file mode 100644
index 300d8109e..000000000
--- a/doc/source/api/nova..tests.test_access.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_access` Module
-==============================================================================
-.. automodule:: nova..tests.test_access
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_api.rst b/doc/source/api/nova..tests.test_api.rst
deleted file mode 100644
index f9473062e..000000000
--- a/doc/source/api/nova..tests.test_api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_api` Module
-==============================================================================
-.. automodule:: nova..tests.test_api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_auth.rst b/doc/source/api/nova..tests.test_auth.rst
deleted file mode 100644
index ff4445ae4..000000000
--- a/doc/source/api/nova..tests.test_auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_auth` Module
-==============================================================================
-.. automodule:: nova..tests.test_auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_cloud.rst b/doc/source/api/nova..tests.test_cloud.rst
deleted file mode 100644
index 7bd03db9a..000000000
--- a/doc/source/api/nova..tests.test_cloud.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_cloud` Module
-==============================================================================
-.. automodule:: nova..tests.test_cloud
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_compute.rst b/doc/source/api/nova..tests.test_compute.rst
deleted file mode 100644
index 90fd6e9d1..000000000
--- a/doc/source/api/nova..tests.test_compute.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_compute` Module
-==============================================================================
-.. automodule:: nova..tests.test_compute
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_console.rst b/doc/source/api/nova..tests.test_console.rst
deleted file mode 100644
index f695f5d17..000000000
--- a/doc/source/api/nova..tests.test_console.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_console` Module
-==============================================================================
-.. automodule:: nova..tests.test_console
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_direct.rst b/doc/source/api/nova..tests.test_direct.rst
deleted file mode 100644
index 4f7adef19..000000000
--- a/doc/source/api/nova..tests.test_direct.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_direct` Module
-==============================================================================
-.. automodule:: nova..tests.test_direct
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_flags.rst b/doc/source/api/nova..tests.test_flags.rst
deleted file mode 100644
index 2ec35d6c2..000000000
--- a/doc/source/api/nova..tests.test_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_flags` Module
-==============================================================================
-.. automodule:: nova..tests.test_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_instance_types.rst b/doc/source/api/nova..tests.test_instance_types.rst
deleted file mode 100644
index ebe689966..000000000
--- a/doc/source/api/nova..tests.test_instance_types.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_instance_types` Module
-==============================================================================
-.. automodule:: nova..tests.test_instance_types
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_localization.rst b/doc/source/api/nova..tests.test_localization.rst
deleted file mode 100644
index d93c83ba7..000000000
--- a/doc/source/api/nova..tests.test_localization.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_localization` Module
-==============================================================================
-.. automodule:: nova..tests.test_localization
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_log.rst b/doc/source/api/nova..tests.test_log.rst
deleted file mode 100644
index 04ff5ead1..000000000
--- a/doc/source/api/nova..tests.test_log.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_log` Module
-==============================================================================
-.. automodule:: nova..tests.test_log
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_middleware.rst b/doc/source/api/nova..tests.test_middleware.rst
deleted file mode 100644
index 2f9df5832..000000000
--- a/doc/source/api/nova..tests.test_middleware.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_middleware` Module
-==============================================================================
-.. automodule:: nova..tests.test_middleware
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_misc.rst b/doc/source/api/nova..tests.test_misc.rst
deleted file mode 100644
index 4975f89d7..000000000
--- a/doc/source/api/nova..tests.test_misc.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_misc` Module
-==============================================================================
-.. automodule:: nova..tests.test_misc
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_network.rst b/doc/source/api/nova..tests.test_network.rst
deleted file mode 100644
index 3a4b04ea4..000000000
--- a/doc/source/api/nova..tests.test_network.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_network` Module
-==============================================================================
-.. automodule:: nova..tests.test_network
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_quota.rst b/doc/source/api/nova..tests.test_quota.rst
deleted file mode 100644
index 24ebf9ca3..000000000
--- a/doc/source/api/nova..tests.test_quota.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_quota` Module
-==============================================================================
-.. automodule:: nova..tests.test_quota
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_rpc.rst b/doc/source/api/nova..tests.test_rpc.rst
deleted file mode 100644
index c141d6889..000000000
--- a/doc/source/api/nova..tests.test_rpc.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_rpc` Module
-==============================================================================
-.. automodule:: nova..tests.test_rpc
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_scheduler.rst b/doc/source/api/nova..tests.test_scheduler.rst
deleted file mode 100644
index 1cd9991db..000000000
--- a/doc/source/api/nova..tests.test_scheduler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_scheduler` Module
-==============================================================================
-.. automodule:: nova..tests.test_scheduler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_service.rst b/doc/source/api/nova..tests.test_service.rst
deleted file mode 100644
index a264fbb55..000000000
--- a/doc/source/api/nova..tests.test_service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_service` Module
-==============================================================================
-.. automodule:: nova..tests.test_service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_test.rst b/doc/source/api/nova..tests.test_test.rst
deleted file mode 100644
index 389eb3c99..000000000
--- a/doc/source/api/nova..tests.test_test.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_test` Module
-==============================================================================
-.. automodule:: nova..tests.test_test
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_utils.rst b/doc/source/api/nova..tests.test_utils.rst
deleted file mode 100644
index d61a7021f..000000000
--- a/doc/source/api/nova..tests.test_utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_utils` Module
-==============================================================================
-.. automodule:: nova..tests.test_utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_virt.rst b/doc/source/api/nova..tests.test_virt.rst
deleted file mode 100644
index 9b0dc1e46..000000000
--- a/doc/source/api/nova..tests.test_virt.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_virt` Module
-==============================================================================
-.. automodule:: nova..tests.test_virt
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_volume.rst b/doc/source/api/nova..tests.test_volume.rst
deleted file mode 100644
index b5affe53c..000000000
--- a/doc/source/api/nova..tests.test_volume.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_volume` Module
-==============================================================================
-.. automodule:: nova..tests.test_volume
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_xenapi.rst b/doc/source/api/nova..tests.test_xenapi.rst
deleted file mode 100644
index 7128baee4..000000000
--- a/doc/source/api/nova..tests.test_xenapi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_xenapi` Module
-==============================================================================
-.. automodule:: nova..tests.test_xenapi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.xenapi.stubs.rst b/doc/source/api/nova..tests.xenapi.stubs.rst
deleted file mode 100644
index 356eed9a7..000000000
--- a/doc/source/api/nova..tests.xenapi.stubs.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.xenapi.stubs` Module
-==============================================================================
-.. automodule:: nova..tests.xenapi.stubs
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst
deleted file mode 100644
index 1131d1080..000000000
--- a/doc/source/api/nova..utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..utils` Module
-==============================================================================
-.. automodule:: nova..utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..version.rst b/doc/source/api/nova..version.rst
deleted file mode 100644
index 4b0fc078f..000000000
--- a/doc/source/api/nova..version.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..version` Module
-==============================================================================
-.. automodule:: nova..version
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst
deleted file mode 100644
index caf766765..000000000
--- a/doc/source/api/nova..virt.connection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.connection` Module
-==============================================================================
-.. automodule:: nova..virt.connection
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.disk.rst b/doc/source/api/nova..virt.disk.rst
deleted file mode 100644
index 4a6c0f406..000000000
--- a/doc/source/api/nova..virt.disk.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.disk` Module
-==============================================================================
-.. automodule:: nova..virt.disk
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst
deleted file mode 100644
index 06ecdbf7d..000000000
--- a/doc/source/api/nova..virt.fake.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.fake` Module
-==============================================================================
-.. automodule:: nova..virt.fake
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.hyperv.rst b/doc/source/api/nova..virt.hyperv.rst
deleted file mode 100644
index 48d89378e..000000000
--- a/doc/source/api/nova..virt.hyperv.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.hyperv` Module
-==============================================================================
-.. automodule:: nova..virt.hyperv
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst
deleted file mode 100644
index 4fdeb7af8..000000000
--- a/doc/source/api/nova..virt.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.images` Module
-==============================================================================
-.. automodule:: nova..virt.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst
deleted file mode 100644
index 7fb8aed5f..000000000
--- a/doc/source/api/nova..virt.libvirt_conn.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.libvirt_conn` Module
-==============================================================================
-.. automodule:: nova..virt.libvirt_conn
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.fake.rst b/doc/source/api/nova..virt.xenapi.fake.rst
deleted file mode 100644
index 752dabb14..000000000
--- a/doc/source/api/nova..virt.xenapi.fake.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi.fake` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi.fake
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.network_utils.rst b/doc/source/api/nova..virt.xenapi.network_utils.rst
deleted file mode 100644
index 15f52973e..000000000
--- a/doc/source/api/nova..virt.xenapi.network_utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi.network_utils` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi.network_utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.vm_utils.rst b/doc/source/api/nova..virt.xenapi.vm_utils.rst
deleted file mode 100644
index 18745dc71..000000000
--- a/doc/source/api/nova..virt.xenapi.vm_utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi.vm_utils` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi.vm_utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.vmops.rst b/doc/source/api/nova..virt.xenapi.vmops.rst
deleted file mode 100644
index 30662c58d..000000000
--- a/doc/source/api/nova..virt.xenapi.vmops.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi.vmops` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi.vmops
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.volume_utils.rst b/doc/source/api/nova..virt.xenapi.volume_utils.rst
deleted file mode 100644
index 413e4dc4b..000000000
--- a/doc/source/api/nova..virt.xenapi.volume_utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi.volume_utils` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi.volume_utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.volumeops.rst b/doc/source/api/nova..virt.xenapi.volumeops.rst
deleted file mode 100644
index 626f164df..000000000
--- a/doc/source/api/nova..virt.xenapi.volumeops.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi.volumeops` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi.volumeops
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi_conn.rst b/doc/source/api/nova..virt.xenapi_conn.rst
deleted file mode 100644
index 14ac5147f..000000000
--- a/doc/source/api/nova..virt.xenapi_conn.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi_conn` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi_conn
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.api.rst b/doc/source/api/nova..volume.api.rst
deleted file mode 100644
index 8ad36e049..000000000
--- a/doc/source/api/nova..volume.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.api` Module
-==============================================================================
-.. automodule:: nova..volume.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst
deleted file mode 100644
index 51f5c0729..000000000
--- a/doc/source/api/nova..volume.driver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.driver` Module
-==============================================================================
-.. automodule:: nova..volume.driver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst
deleted file mode 100644
index 91a192a8f..000000000
--- a/doc/source/api/nova..volume.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.manager` Module
-==============================================================================
-.. automodule:: nova..volume.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.san.rst b/doc/source/api/nova..volume.san.rst
deleted file mode 100644
index 1de068928..000000000
--- a/doc/source/api/nova..volume.san.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.san` Module
-==============================================================================
-.. automodule:: nova..volume.san
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst
deleted file mode 100644
index 0bff1c332..000000000
--- a/doc/source/api/nova..wsgi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..wsgi` Module
-==============================================================================
-.. automodule:: nova..wsgi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 859d4e331..6a22b0e74 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -35,6 +35,7 @@ Background Concepts for Nova
.. toctree::
:maxdepth: 3
+ threading
distributed_scheduler
multinic
zone
@@ -56,6 +57,7 @@ Module Reference
services
database
volume
+ xensmvolume
compute
network
auth
diff --git a/doc/source/devref/threading.rst b/doc/source/devref/threading.rst
new file mode 100644
index 000000000..e499f47e1
--- /dev/null
+++ b/doc/source/devref/threading.rst
@@ -0,0 +1,17 @@
+Threading model
+===============
+
+All OpenStack services use *green thread* model of threading, implemented
+through using the Python `eventlet <http://eventlet.net/>`_ and
+`greenlet <http://packages.python.org/greenlet/>`_ libraries.
+
+Green threads use a cooperative model of threading: thread context
+switches can only occur when specific eventlet or greenlet library calls are
+made (e.g., sleep, certain I/O calls). From the operating system's point of
+view, each OpenStack service runs in a single thread.
+
+The use of green threads reduces the likelihood of race conditions, but does
+not completely eliminate them. In some cases, you may need to use the
+``@utils.synchronized(...)`` decorator to avoid races.
+
+
diff --git a/doc/source/devref/xensmvolume.rst b/doc/source/devref/xensmvolume.rst
new file mode 100644
index 000000000..8f14c06f8
--- /dev/null
+++ b/doc/source/devref/xensmvolume.rst
@@ -0,0 +1,88 @@
+Xen Storage Manager Volume Driver
+=================================
+
+The Xen Storage Manager (xensm) driver for Nova-Volume is based on XenAPI Storage Manager. This will not only provide basic storage functionality (like volume creation, and destruction) on a number of different storage back-ends, such as Netapp, NFS, etc. but it will also enable the capability of using more sophisticated storage back-ends for operations like cloning/snapshotting etc. To have an idea of the benefits of using XenAPI SM to provide back-end storage services, the list below shows some of the storage plugins already supported in XenServer/XCP:
+
+- NFS VHD: SR plugin which stores disks as VHD files on a remote NFS filesystem
+- Local VHD on LVM: SR plugin which represents disks as VHD disks on Logical Volumes within a locally-attached Volume Group
+- HBA LUN-per-VDI driver: SR plugin which represents LUNs as VDIs sourced by hardware HBA adapters, e.g. hardware-based iSCSI or FC support
+- NetApp: SR driver for mapping of LUNs to VDIs on a NETAPP server, providing use of fast snapshot and clone features on the filer
+- LVHD over FC: SR plugin which represents disks as VHDs on Logical Volumes within a Volume Group created on an HBA LUN, e.g. hardware-based iSCSI or FC support
+- iSCSI: Base ISCSI SR driver, provides a LUN-per-VDI. Does not support creation of VDIs but accesses existing LUNs on a target.
+- LVHD over iSCSI: SR plugin which represents disks as Logical Volumes within a Volume Group created on an iSCSI LUN
+- EqualLogic: SR driver for mapping of LUNs to VDIs on a EQUALLOGIC array group, providing use of fast snapshot and clone features on the array
+
+Glossary
+=========
+
+ XenServer: Commercial, supported product from Citrix
+
+ Xen Cloud Platform (XCP): Open-source equivalent of XenServer (and the development project for the toolstack). Everything said about XenServer below applies equally to XCP
+
+ XenAPI: The management API exposed by XenServer and XCP
+
+ xapi: The primary daemon on XenServer and Xen Cloud Platform; the one that exposes the XenAPI
+
+
+Design
+=======
+
+Definitions
+-----------
+
+Backend: A term for a particular storage backend. This could be iSCSI, NFS, Netapp etc.
+Backend-config: All the parameters required to connect to a specific backend. For e.g. For NFS, this would be the server, path, etc.
+Flavor: This term is equivalent to volume "types". A user friendly term to specify some notion of quality of service. For example, "gold" might mean that the volumes will use a backend where backups are possible.
+
+A flavor can be associated with multiple backends. The volume scheduler, with the help of the driver, will decide which backend will be used to create a volume of a particular flavor. Currently, the driver uses a simple "first-fit" policy, where the first backend that can successfully create this volume is the one that is used.
+
+Operation
+----------
+
+Using the nova-manage command detailed in the implementation, an admin can add flavors and backends.
+
+One or more nova-volume service instances will be deployed per availability zone. When an instance is started, it will create storage repositories (SRs) to connect to the backends available within that zone. All nova-volume instances within a zone can see all the available backends. These instances are completely symmetric and hence should be able to service any create_volume request within the zone.
+
+
+Commands
+=========
+
+A category called "sm" has been added to nova-manage in the class StorageManagerCommands.
+
+The following actions will be added:
+
+- flavor_list
+- flavor_create
+- flavor_delete
+- backend_list
+- backend_add
+- backend_remove
+
+Usage:
+------
+
+nova-manage sm flavor_create <label> <description>
+
+nova-manage sm flavor_delete<label>
+
+nova-manage sm backend_add <flavor label> <SR type> [config connection parameters]
+
+Note: SR type and config connection parameters are in keeping with the Xen Command Line Interface. http://support.citrix.com/article/CTX124887
+
+nova-manage sm backend_delete <backend-id>
+
+Examples:
+---------
+
+nova-manage sm flavor_create gold "Not all that glitters"
+
+nova-manage sm flavor_delete gold
+
+nova-manage sm backend_add gold nfs name_label=toybox-renuka server=myserver serverpath=/local/scratch/myname
+
+nova-manage sm backend_remove 1
+
+API Changes
+===========
+
+No API changes have been introduced so far. The existing euca-create-volume and euca-delete-volume commands (or equivalent Openstack API commands) should be used.
diff --git a/doc/source/vmwareapi_readme.rst b/doc/source/vmwareapi_readme.rst
index 85f2694c0..5aea91a8a 100644
--- a/doc/source/vmwareapi_readme.rst
+++ b/doc/source/vmwareapi_readme.rst
@@ -104,6 +104,14 @@ Note:- Due to a faulty wsdl being shipped with ESX vSphere 4.1 we need a working
* Set the flag "--vmwareapi_wsdl_loc" with url, "http://<WEB SERVER>/vimService.wsdl"
+Debug flag
+----------
+::
+
+suds logging is very verbose and turned off by default. If you need to
+debug the VMware API calls, change the default_log_levels flag appropriately.
+
+
VLAN Network Manager
--------------------
VLAN network support is added through a custom network driver in the nova-compute node i.e "nova.network.vmwareapi_net" and it uses a Physical ethernet adapter on the VMware ESX/ESXi host for VLAN Networking (the name of the ethernet adapter is specified as vlan_interface flag in the nova-compute configuration flag) in the nova-compute node.
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 216f27b86..af175990f 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -18,20 +18,23 @@ use = egg:Paste#urlmap
/2009-04-04: ec2metadata
[pipeline:ec2cloud]
-pipeline = logrequest ec2noauth cloudrequest authorizer ec2executor
+pipeline = ec2faultwrap logrequest ec2noauth cloudrequest authorizer ec2executor
# NOTE(vish): use the following pipeline for deprecated auth
-#pipeline = logrequest authenticate cloudrequest authorizer ec2executor
+#pipeline = ec2faultwrap logrequest authenticate cloudrequest authorizer ec2executor
[pipeline:ec2admin]
-pipeline = logrequest ec2noauth adminrequest authorizer ec2executor
+pipeline = ec2faultwrap logrequest ec2noauth adminrequest authorizer ec2executor
# NOTE(vish): use the following pipeline for deprecated auth
-#pipeline = logrequest authenticate adminrequest authorizer ec2executor
+#pipeline = ec2faultwrap logrequest authenticate adminrequest authorizer ec2executor
[pipeline:ec2metadata]
-pipeline = logrequest ec2md
+pipeline = ec2faultwrap logrequest ec2md
[pipeline:ec2versions]
-pipeline = logrequest ec2ver
+pipeline = ec2faultwrap logrequest ec2ver
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
diff --git a/nova/api/direct.py b/nova/api/direct.py
index fdd2943d2..5e6826f82 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -254,10 +254,10 @@ class Reflection(object):
class ServiceWrapper(object):
- """Wrapper to dynamically povide a WSGI controller for arbitrary objects.
+ """Wrapper to dynamically provide a WSGI controller for arbitrary objects.
With lightweight introspection allows public methods on the object to
- be accesed via simple WSGI routing and parameters and serializes the
+ be accessed via simple WSGI routing and parameters and serializes the
return values.
Automatically used be nova.api.direct.Router to wrap registered instances.
@@ -297,14 +297,14 @@ class ServiceWrapper(object):
}[content_type]
return serializer.serialize(result)
except Exception, e:
- raise exception.Error(_("Returned non-serializable type: %s")
+ raise exception.Error(_("Returned non-serializeable type: %s")
% result)
class Limited(object):
__notdoc = """Limit the available methods on a given object.
- (Not a docstring so that the docstring can be conditionally overriden.)
+ (Not a docstring so that the docstring can be conditionally overridden.)
Useful when defining a public API that only exposes a subset of an
internal API.
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 4b4c0f536..db92ca053 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -36,6 +36,7 @@ from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
+from nova.api.ec2 import faults
from nova.auth import manager
FLAGS = flags.FLAGS
@@ -49,6 +50,19 @@ flags.DEFINE_integer('lockout_window', 15,
flags.DECLARE('use_forwarded_for', 'nova.api.auth')
+## Fault Wrapper around all EC2 requests ##
+class FaultWrapper(wsgi.Middleware):
+ """Calls the middleware stack, captures any exceptions into faults."""
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ try:
+ return req.get_response(self.application)
+ except Exception as ex:
+ LOG.exception(_("FaultWrapper: %s"), unicode(ex))
+ return faults.Fault(webob.exc.HTTPInternalServerError())
+
+
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@@ -95,7 +109,7 @@ class Lockout(wsgi.Middleware):
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
- uses a very simple in-proccess cache. Due to the simplicity of
+ uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
@@ -391,6 +405,10 @@ class Executor(wsgi.Application):
LOG.info(_('NotAuthorized raised: %s'), unicode(ex),
context=context)
return self._error(req, context, type(ex).__name__, unicode(ex))
+ except exception.InvalidRequest as ex:
+ LOG.debug(_('InvalidRequest raised: %s'), unicode(ex),
+ context=context)
+ return self._error(req, context, type(ex).__name__, unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 9a3e55925..61b5ba3a5 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -24,10 +24,14 @@ import datetime
# TODO(termie): replace minidom with etree
from xml.dom import minidom
+from nova import flags
from nova import log as logging
+from nova import exception
from nova.api.ec2 import ec2utils
+from nova.api.ec2.admin import AdminController
LOG = logging.getLogger("nova.api.request")
+FLAGS = flags.FLAGS
def _underscore_to_camelcase(str):
@@ -53,6 +57,14 @@ class APIRequest(object):
def invoke(self, context):
try:
+ # Raise NotImplemented exception for Admin specific request if
+ # admin flag is set to false in nova.conf
+ if (isinstance(self.controller, AdminController) and
+ (not FLAGS.allow_ec2_admin_api)):
+ ## Raise InvalidRequest exception for EC2 Admin interface ##
+ LOG.exception("Unsupported API request")
+ raise exception.InvalidRequest()
+
method = getattr(self.controller,
ec2utils.camelcase_to_underscore(self.action))
except AttributeError:
@@ -63,7 +75,7 @@ class APIRequest(object):
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
- raise Exception(_error)
+ raise exception.InvalidRequest()
args = ec2utils.dict_from_dotted_str(self.args.items())
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 16ea74025..de9b9e660 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -951,8 +951,8 @@ class CloudController(object):
else:
ec2_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_id)
- output = self.compute_api.get_console_output(
- context, instance_id=instance_id)
+ instance = self.compute_api.get(context, instance_id)
+ output = self.compute_api.get_console_output(context, instance)
now = utils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
@@ -961,15 +961,15 @@ class CloudController(object):
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
instance_id = ec2utils.ec2_id_to_id(ec2_id)
- return self.compute_api.get_ajax_console(context,
- instance_id=instance_id)
+ instance = self.compute_api.get(context, instance_id)
+ return self.compute_api.get_ajax_console(context, instance)
def get_vnc_console(self, context, instance_id, **kwargs):
"""Returns vnc browser url. Used by OS dashboard."""
ec2_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_id)
- return self.compute_api.get_vnc_console(context,
- instance_id=instance_id)
+ instance = self.compute_api.get(context, instance_id)
+ return self.compute_api.get_vnc_console(context, instance)
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
@@ -1437,10 +1437,7 @@ class CloudController(object):
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
- block_device_mapping=kwargs.get('block_device_mapping', {}),
- # NOTE(comstud): Unfortunately, EC2 requires that the
- # instance DB entries have been created..
- wait_for_instances=True)
+ block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context, resv_id)
def _do_instance(self, action, context, ec2_id):
@@ -1455,27 +1452,39 @@ class CloudController(object):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
- self._do_instances(self.compute_api.delete, context, instance_id)
+ for ec2_id in instance_id:
+ _instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context, _instance_id)
+ self.compute_api.delete(context, instance)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
- self._do_instances(self.compute_api.reboot, context, instance_id)
+ for ec2_id in instance_id:
+ _instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context, _instance_id)
+ self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids"""
LOG.debug(_("Going to stop instances"))
- self._do_instances(self.compute_api.stop, context, instance_id)
+ for ec2_id in instance_id:
+ _instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context, _instance_id)
+ self.compute_api.stop(context, instance)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids"""
LOG.debug(_("Going to start instances"))
- self._do_instances(self.compute_api.start, context, instance_id)
+ for ec2_id in instance_id:
+ _instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context, _instance_id)
+ self.compute_api.start(context, instance)
return True
def rescue_instance(self, context, instance_id, **kwargs):
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 514dabe48..ee68edbd7 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -63,7 +63,7 @@ def _try_convert(value):
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
- 0xN, -0xN int from hex (postitive) (N is any number)
+ 0xN, -0xN int from hex (positive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
@@ -108,7 +108,7 @@ def _try_convert(value):
def dict_from_dotted_str(items):
"""parse multi dot-separated argument into dict.
- EBS boot uses multi dot-separeted arguments like
+ EBS boot uses multi dot-separated arguments like
BlockDeviceMapping.1.DeviceName=snap-id
Convert the above into
{'block_device_mapping': {'1': {'device_name': snap-id}}}
diff --git a/nova/api/ec2/faults.py b/nova/api/ec2/faults.py
new file mode 100644
index 000000000..9e47702d9
--- /dev/null
+++ b/nova/api/ec2/faults.py
@@ -0,0 +1,64 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob.dec
+import webob.exc
+
+from nova import utils
+from nova import context
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+
+class Fault(webob.exc.HTTPException):
+ """Captures exception and return REST Response."""
+
+ def __init__(self, exception):
+ """Create a response for the given webob.exc.exception."""
+ self.wrapped_exc = exception
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """Generate a WSGI response based on the exception passed to ctor."""
+ code = self.wrapped_exc.status_int
+ message = self.wrapped_exc.explanation
+
+ if code == 501:
+ message = "The requested function is not supported"
+ code = str(code)
+
+ if 'AWSAccessKeyId' not in req.params:
+ raise webob.exc.HTTPBadRequest()
+ user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
+ project_id = project_id or user_id
+ remote_address = getattr(req, 'remote_address', '127.0.0.1')
+ if FLAGS.use_forwarded_for:
+ remote_address = req.headers.get('X-Forwarded-For', remote_address)
+
+ ctxt = context.RequestContext(user_id,
+ project_id,
+ remote_address=remote_address)
+
+ resp = webob.Response()
+ resp.status = self.wrapped_exc.status_int
+ resp.headers['Content-Type'] = 'text/xml'
+ resp.body = str('<?xml version="1.0"?>\n'
+ '<Response><Errors><Error><Code>%s</Code>'
+ '<Message>%s</Message></Error></Errors>'
+ '<RequestID>%s</RequestID></Response>' %
+ (utils.utf8(code), utils.utf8(message),
+ utils.utf8(ctxt.request_id)))
+
+ return resp
diff --git a/nova/api/manager.py b/nova/api/manager.py
index b3fcf9352..9cb364e2d 100644
--- a/nova/api/manager.py
+++ b/nova/api/manager.py
@@ -36,7 +36,7 @@ class EC2Manager(manager.Manager):
def init_host(self):
"""Perform any initialization.
- Currently, we only add an iptables filter rule for the metadta
+ Currently, we only add an iptables filter rule for the metadata
service.
"""
self.network_driver.metadata_accept()
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 0f3f1fff7..3b82d358f 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -46,6 +46,7 @@ _STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
+ task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
},
@@ -287,7 +288,7 @@ def get_networks_for_instance(context, instance):
"""Returns a prepared nw_info list for passing into the view
builders
- We end up with a datastructure like:
+ We end up with a data structure like:
{'public': {'ips': [{'addr': '10.0.0.1', 'version': 4},
{'addr': '2001::1', 'version': 6}],
'floating_ips': [{'addr': '172.16.0.1', 'version': 4},
diff --git a/nova/api/openstack/contrib/admin_actions.py b/nova/api/openstack/contrib/admin_actions.py
index f501e9a41..8ed0827b8 100644
--- a/nova/api/openstack/contrib/admin_actions.py
+++ b/nova/api/openstack/contrib/admin_actions.py
@@ -23,9 +23,7 @@ from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
-from nova import utils
from nova.api.openstack import extensions
-from nova.api.openstack import faults
from nova.scheduler import api as scheduler_api
@@ -54,7 +52,8 @@ class Admin_actions(extensions.ExtensionDescriptor):
"""Permit Admins to pause the server"""
ctxt = req.environ['nova.context']
try:
- self.compute_api.pause(ctxt, id)
+ server = self.compute_api.get(ctxt, id)
+ self.compute_api.pause(ctxt, server)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::pause %s"), readable)
@@ -68,7 +67,8 @@ class Admin_actions(extensions.ExtensionDescriptor):
"""Permit Admins to unpause the server"""
ctxt = req.environ['nova.context']
try:
- self.compute_api.unpause(ctxt, id)
+ server = self.compute_api.get(ctxt, id)
+ self.compute_api.unpause(ctxt, server)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::unpause %s"), readable)
@@ -82,7 +82,8 @@ class Admin_actions(extensions.ExtensionDescriptor):
"""Permit admins to suspend the server"""
context = req.environ['nova.context']
try:
- self.compute_api.suspend(context, id)
+ server = self.compute_api.get(context, id)
+ self.compute_api.suspend(context, server)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::suspend %s"), readable)
@@ -96,7 +97,8 @@ class Admin_actions(extensions.ExtensionDescriptor):
"""Permit admins to resume the server from suspend"""
context = req.environ['nova.context']
try:
- self.compute_api.resume(context, id)
+ server = self.compute_api.get(context, id)
+ self.compute_api.resume(context, server)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::resume %s"), readable)
diff --git a/nova/api/openstack/contrib/createserverext.py b/nova/api/openstack/contrib/createserverext.py
index da95164e8..ab5037304 100644
--- a/nova/api/openstack/contrib/createserverext.py
+++ b/nova/api/openstack/contrib/createserverext.py
@@ -21,10 +21,12 @@ from nova.api.openstack import wsgi
class CreateServerController(servers.Controller):
- def _build_view(self, req, instance, is_detail=False):
- server = super(CreateServerController, self)._build_view(req,
- instance,
- is_detail)
+ def _build_view(self, req, instance, is_detail=False, is_create=False):
+ server = super(CreateServerController, self).\
+ _build_view(req,
+ instance,
+ is_detail=is_detail,
+ is_create=is_create)
if is_detail:
self._build_security_groups(server['server'], instance)
return server
diff --git a/nova/api/openstack/contrib/deferred_delete.py b/nova/api/openstack/contrib/deferred_delete.py
index 58223e901..013acbbea 100644
--- a/nova/api/openstack/contrib/deferred_delete.py
+++ b/nova/api/openstack/contrib/deferred_delete.py
@@ -16,14 +16,10 @@
"""The deferred instance delete extension."""
import webob
-from webob import exc
from nova import compute
-from nova import exception
from nova import log as logging
-from nova.api.openstack import common
from nova.api.openstack import extensions
-from nova.api.openstack import faults
from nova.api.openstack import servers
@@ -53,7 +49,8 @@ class Deferred_delete(extensions.ExtensionDescriptor):
"""Force delete of instance before deferred cleanup."""
context = req.environ["nova.context"]
- self.compute_api.force_delete(context, instance_id)
+ instance = self.compute_api.get(context, instance_id)
+ self.compute_api.force_delete(context, instance)
return webob.Response(status_int=202)
def get_actions(self):
diff --git a/nova/api/openstack/contrib/diskconfig.py b/nova/api/openstack/contrib/diskconfig.py
index c0ba70363..2688c5237 100644
--- a/nova/api/openstack/contrib/diskconfig.py
+++ b/nova/api/openstack/contrib/diskconfig.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License
-import json
from webob import exc
import webob
@@ -23,8 +22,6 @@ from nova import compute
from nova import exception
import nova.image
from nova import log as logging
-from nova import network
-from nova import rpc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
diff --git a/nova/api/openstack/contrib/extended_status.py b/nova/api/openstack/contrib/extended_status.py
new file mode 100644
index 000000000..625f3ab2b
--- /dev/null
+++ b/nova/api/openstack/contrib/extended_status.py
@@ -0,0 +1,110 @@
+# Copyright 2011 Openstack, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The Extended Status Admin API extension."""
+
+import traceback
+
+import webob
+from webob import exc
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import xmlutil
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.api.openstack.contrib.extendedstatus")
+
+
+class Extended_status(extensions.ExtensionDescriptor):
+ """Extended Status support"""
+
+ name = "ExtendedStatus"
+ alias = "OS-EXT-STS"
+ namespace = "http://docs.openstack.org/ext/extended_status/api/v1.1"
+ updated = "2011-11-03T00:00:00+00:00"
+
+ def get_request_extensions(self):
+ request_extensions = []
+
+ def _get_and_extend_one(context, server_id, body):
+ compute_api = compute.API()
+ try:
+ inst_ref = compute_api.routing_get(context, server_id)
+ except exception.NotFound:
+ explanation = _("Server not found.")
+ raise exc.HTTPNotFound(explanation=explanation)
+
+ for state in ['task_state', 'vm_state', 'power_state']:
+ key = "%s:%s" % (Extended_status.alias, state)
+ body['server'][key] = inst_ref[state]
+
+ def _get_and_extend_all(context, body):
+ # TODO(mdietz): This is a brilliant argument for this to *not*
+ # be an extension. The problem is we either have to 1) duplicate
+ # the logic from the servers controller or 2) do what we did
+ # and iterate over the list of potentially sorted, limited
+ # and whatever else elements and find each individual.
+ compute_api = compute.API()
+
+ for server in body['servers']:
+ try:
+ inst_ref = compute_api.routing_get(context, server['id'])
+ except exception.NotFound:
+ explanation = _("Server not found.")
+ raise exc.HTTPNotFound(explanation=explanation)
+
+ for state in ['task_state', 'vm_state', 'power_state']:
+ key = "%s:%s" % (Extended_status.alias, state)
+ server[key] = inst_ref[state]
+
+ def _extended_status_handler(req, res, body):
+ context = req.environ['nova.context']
+ server_id = req.environ['wsgiorg.routing_args'][1].get('id')
+
+ if 'nova.template' in req.environ:
+ tmpl = req.environ['nova.template']
+ tmpl.attach(ExtendedStatusTemplate())
+
+ if server_id:
+ _get_and_extend_one(context, server_id, body)
+ else:
+ _get_and_extend_all(context, body)
+ return res
+
+ if FLAGS.allow_admin_api:
+ req_ext = extensions.RequestExtension('GET',
+ '/:(project_id)/servers/:(id)',
+ _extended_status_handler)
+ request_extensions.append(req_ext)
+
+ return request_extensions
+
+
+class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server')
+ root.set('{%s}task_state' % Extended_status.namespace,
+ '%s:task_state' % Extended_status.alias)
+ root.set('{%s}power_state' % Extended_status.namespace,
+ '%s:power_state' % Extended_status.alias)
+ root.set('{%s}vm_state' % Extended_status.namespace,
+ '%s:vm_state' % Extended_status.alias)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Extended_status.alias: Extended_status.namespace})
diff --git a/nova/api/openstack/contrib/flavorextraspecs.py b/nova/api/openstack/contrib/flavorextraspecs.py
index 8a756f04a..d71e9cf8b 100644
--- a/nova/api/openstack/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/contrib/flavorextraspecs.py
@@ -20,7 +20,7 @@
from webob import exc
from nova import db
-from nova import quota
+from nova import exception
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
@@ -53,7 +53,7 @@ class FlavorExtraSpecsController(object):
db.instance_type_extra_specs_update_or_create(context,
flavor_id,
specs)
- except quota.QuotaError as error:
+ except exception.QuotaError as error:
self._handle_quota_error(error)
return body
@@ -70,7 +70,7 @@ class FlavorExtraSpecsController(object):
db.instance_type_extra_specs_update_or_create(context,
flavor_id,
body)
- except quota.QuotaError as error:
+ except exception.QuotaError as error:
self._handle_quota_error(error)
return body
diff --git a/nova/api/openstack/contrib/multinic.py b/nova/api/openstack/contrib/multinic.py
index 85ff423aa..26968646d 100644
--- a/nova/api/openstack/contrib/multinic.py
+++ b/nova/api/openstack/contrib/multinic.py
@@ -15,12 +15,13 @@
"""The multinic extension."""
-from webob import exc
import webob
+from webob import exc
+from nova.api.openstack import extensions
from nova import compute
+from nova import exception
from nova import log as logging
-from nova.api.openstack import extensions
LOG = logging.getLogger("nova.api.multinic")
@@ -63,6 +64,13 @@ class Multinic(extensions.ExtensionDescriptor):
return actions
+ def _get_instance(self, context, instance_id):
+ try:
+ return self.compute_api.get(context, instance_id)
+ except exception.InstanceNotFound:
+ msg = _("Server not found")
+ raise exc.HTTPNotFound(msg)
+
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
@@ -71,10 +79,10 @@ class Multinic(extensions.ExtensionDescriptor):
msg = _("Missing 'networkId' argument for addFixedIp")
raise exc.HTTPUnprocessableEntity(explanation=msg)
- # Add the fixed IP
+ context = req.environ['nova.context']
+ instance = self._get_instance(context, id)
network_id = input_dict['addFixedIp']['networkId']
- self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
- network_id)
+ self.compute_api.add_fixed_ip(context, instance, network_id)
return webob.Response(status_int=202)
def _remove_fixed_ip(self, input_dict, req, id):
@@ -85,11 +93,12 @@ class Multinic(extensions.ExtensionDescriptor):
msg = _("Missing 'address' argument for removeFixedIp")
raise exc.HTTPUnprocessableEntity(explanation=msg)
- # Remove the fixed IP
+ context = req.environ['nova.context']
+ instance = self._get_instance(context, id)
address = input_dict['removeFixedIp']['address']
+
try:
- self.compute_api.remove_fixed_ip(req.environ['nova.context'], id,
- address)
+ self.compute_api.remove_fixed_ip(context, instance, address)
except exceptions.FixedIpNotFoundForSpecificInstance:
LOG.exception(_("Unable to find address %r") % address)
raise exc.HTTPBadRequest()
diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py
index 83d75394a..184b4d12f 100644
--- a/nova/api/openstack/contrib/quotas.py
+++ b/nova/api/openstack/contrib/quotas.py
@@ -49,7 +49,7 @@ class QuotaSetsController(object):
return self._format_quota_set(id,
quota.get_project_quotas(context, id))
except exception.NotAuthorized:
- return webob.Response(status_int=403)
+ raise webob.exc.HTTPForbidden()
def update(self, req, id, body):
context = req.environ['nova.context']
@@ -65,7 +65,7 @@ class QuotaSetsController(object):
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.AdminRequired:
- return webob.Response(status_int=403)
+ raise webob.exc.HTTPForbidden()
return {'quota_set': quota.get_project_quotas(context, project_id)}
def defaults(self, req, id):
diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py
index 4e1beb0ba..d3f38b200 100644
--- a/nova/api/openstack/contrib/rescue.py
+++ b/nova/api/openstack/contrib/rescue.py
@@ -17,12 +17,13 @@
import webob
from webob import exc
+from nova.api.openstack import extensions as exts
+from nova.api.openstack import faults
from nova import compute
+from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
-from nova.api.openstack import extensions as exts
-from nova.api.openstack import faults
FLAGS = flags.FLAGS
@@ -41,24 +42,33 @@ class Rescue(exts.ExtensionDescriptor):
super(Rescue, self).__init__(ext_mgr)
self.compute_api = compute.API()
+ def _get_instance(self, context, instance_id):
+ try:
+ return self.compute_api.get(context, instance_id)
+ except exception.InstanceNotFound:
+ msg = _("Server not found")
+ raise exc.HTTPNotFound(msg)
+
@exts.wrap_errors
def _rescue(self, input_dict, req, instance_id):
"""Rescue an instance."""
context = req.environ["nova.context"]
+
if input_dict['rescue'] and 'adminPass' in input_dict['rescue']:
password = input_dict['rescue']['adminPass']
else:
password = utils.generate_password(FLAGS.password_length)
- self.compute_api.rescue(context, instance_id, rescue_password=password)
+ instance = self._get_instance(context, instance_id)
+ self.compute_api.rescue(context, instance, rescue_password=password)
return {'adminPass': password}
@exts.wrap_errors
def _unrescue(self, input_dict, req, instance_id):
"""Unrescue an instance."""
context = req.environ["nova.context"]
- self.compute_api.unrescue(context, instance_id)
-
+ instance = self._get_instance(context, instance_id)
+ self.compute_api.unrescue(context, instance)
return webob.Response(status_int=202)
def get_actions(self):
diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py
index 662711951..9072a34ee 100644
--- a/nova/api/openstack/contrib/security_groups.py
+++ b/nova/api/openstack/contrib/security_groups.py
@@ -251,7 +251,7 @@ class SecurityGroupRulesController(SecurityGroupController):
cidr=None, group_id=None):
values = {}
- if group_id:
+ if group_id is not None:
try:
parent_group_id = int(parent_group_id)
group_id = int(group_id)
diff --git a/nova/api/openstack/contrib/volumetypes.py b/nova/api/openstack/contrib/volumetypes.py
index d63104a29..64a9e0f02 100644
--- a/nova/api/openstack/contrib/volumetypes.py
+++ b/nova/api/openstack/contrib/volumetypes.py
@@ -21,7 +21,6 @@ from webob import exc
from nova import db
from nova import exception
-from nova import quota
from nova.volume import volume_types
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
@@ -55,7 +54,7 @@ class VolumeTypesController(object):
try:
volume_types.create(context, name, specs)
vol_type = volume_types.get_volume_type_by_name(context, name)
- except quota.QuotaError as error:
+ except exception.QuotaError as error:
self._handle_quota_error(error)
except exception.NotFound:
raise exc.HTTPNotFound()
@@ -118,7 +117,7 @@ class VolumeTypeExtraSpecsController(object):
db.volume_type_extra_specs_update_or_create(context,
vol_type_id,
specs)
- except quota.QuotaError as error:
+ except exception.QuotaError as error:
self._handle_quota_error(error)
return body
@@ -135,7 +134,7 @@ class VolumeTypeExtraSpecsController(object):
db.volume_type_extra_specs_update_or_create(context,
vol_type_id,
body)
- except quota.QuotaError as error:
+ except exception.QuotaError as error:
self._handle_quota_error(error)
return body
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 382404309..670d54def 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -92,7 +92,7 @@ class ExtensionDescriptor(object):
return actions
def get_request_extensions(self):
- """List of extensions.RequestException extension objects.
+ """List of extensions.RequestExtension extension objects.
Request extensions are used to handle custom request data.
diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py
index 0ef863eed..83381d755 100644
--- a/nova/api/openstack/faults.py
+++ b/nova/api/openstack/faults.py
@@ -24,7 +24,7 @@ from nova.api.openstack import wsgi
class Fault(webob.exc.HTTPException):
- """An RS API fault response."""
+ """Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
@@ -73,6 +73,9 @@ class Fault(webob.exc.HTTPException):
return self.wrapped_exc
+ def __str__(self):
+ return self.wrapped_exc.__str__()
+
class OverLimitFault(webob.exc.HTTPException):
"""
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index afc900cce..7280ecbce 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -39,9 +39,6 @@ class Controller(object):
items = self._get_flavors(req, is_detail=True)
return dict(flavors=items)
- def _get_view_builder(self, req):
- raise NotImplementedError()
-
def _get_flavors(self, req, is_detail=True):
"""Helper function that returns a list of flavor dicts."""
filters = {}
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index 0b549de2e..56e839526 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -109,7 +109,7 @@ def create_resource():
class Limit(object):
"""
- Stores information about a limit for HTTP requets.
+ Stores information about a limit for HTTP requests.
"""
UNITS = {
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index d764075b7..4145898c1 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -21,7 +21,6 @@ from nova import compute
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
-from nova import quota
class Controller(object):
@@ -33,7 +32,8 @@ class Controller(object):
def _get_metadata(self, context, server_id):
try:
- meta = self.compute_api.get_instance_metadata(context, server_id)
+ server = self.compute_api.get(context, server_id)
+ meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
@@ -107,8 +107,9 @@ class Controller(object):
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
+ server = self.compute_api.get(context, server_id)
return self.compute_api.update_instance_metadata(context,
- server_id,
+ server,
metadata,
delete)
@@ -120,7 +121,7 @@ class Controller(object):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
- except quota.QuotaError as error:
+ except exception.QuotaError as error:
self._handle_quota_error(error)
def show(self, req, server_id, id):
@@ -147,7 +148,8 @@ class Controller(object):
raise exc.HTTPNotFound(explanation=msg)
try:
- self.compute_api.delete_instance_metadata(context, server_id, id)
+ server = self.compute_api.get(context, server_id)
+ self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 29160c5ce..b3c4fbce9 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -40,7 +40,6 @@ from nova import exception
from nova import flags
from nova import image
from nova import log as logging
-from nova import quota
from nova.rpc import common as rpc_common
from nova.scheduler import api as scheduler_api
from nova import utils
@@ -87,13 +86,7 @@ class Controller(object):
def _get_block_device_mapping(self, data):
"""Get block_device_mapping from 'server' dictionary.
- Overidden by volumes controller.
- """
- return None
-
- def _get_block_device_mapping(self, data):
- """Get block_device_mapping from 'server' dictionary.
- Overidden by volumes controller.
+ Overridden by volumes controller.
"""
return None
@@ -148,6 +141,13 @@ class Controller(object):
limited_list = self._limit_items(instance_list, req)
return self._build_list(req, limited_list, is_detail=is_detail)
+ def _get_server(self, context, instance_uuid):
+ """Utility function for looking up an instance by uuid"""
+ try:
+ return self.compute_api.routing_get(context, instance_uuid)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
def _handle_quota_error(self, error):
"""
Reraise quota errors as api-specific http exceptions
@@ -191,45 +191,6 @@ class Controller(object):
msg = _("Server name is an empty string")
raise exc.HTTPBadRequest(explanation=msg)
- def _get_kernel_ramdisk_from_image(self, req, image_service, image_id):
- """Fetch an image from the ImageService, then if present, return the
- associated kernel and ramdisk image IDs.
- """
- context = req.environ['nova.context']
- image_meta = image_service.show(context, image_id)
- # NOTE(sirp): extracted to a separate method to aid unit-testing, the
- # new method doesn't need a request obj or an ImageService stub
- kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
- image_meta)
- return kernel_id, ramdisk_id
-
- @staticmethod
- def _do_get_kernel_ramdisk_from_image(image_meta):
- """Given an ImageService image_meta, return kernel and ramdisk image
- ids if present.
-
- This is only valid for `ami` style images.
- """
- image_id = image_meta['id']
- if image_meta['status'] != 'active':
- raise exception.ImageUnacceptable(image_id=image_id,
- reason=_("status is not active"))
-
- if image_meta.get('container_format') != 'ami':
- return None, None
-
- try:
- kernel_id = image_meta['properties']['kernel_id']
- except KeyError:
- raise exception.KernelNotFoundForImage(image_id=image_id)
-
- try:
- ramdisk_id = image_meta['properties']['ramdisk_id']
- except KeyError:
- ramdisk_id = None
-
- return kernel_id, ramdisk_id
-
def _get_injected_files(self, personality):
"""
Create a list of injected files from the personality attribute
@@ -363,17 +324,6 @@ class Controller(object):
if str(image_href).startswith(req.application_url):
image_href = image_href.split('/').pop()
- try:
- image_service, image_id = image.get_image_service(context,
- image_href)
- kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
- req, image_service, image_id)
- images = set([str(x['id']) for x in image_service.index(context)])
- assert str(image_id) in images
- except Exception, e:
- msg = _("Cannot find requested image %(image_href)s: %(e)s" %
- locals())
- raise exc.HTTPBadRequest(explanation=msg)
personality = server_dict.get('personality')
config_drive = server_dict.get('config_drive')
@@ -442,9 +392,7 @@ class Controller(object):
(instances, resv_id) = self.compute_api.create(context,
inst_type,
- image_id,
- kernel_id=kernel_id,
- ramdisk_id=ramdisk_id,
+ image_href,
display_name=name,
display_description=name,
key_name=key_name,
@@ -462,9 +410,8 @@ class Controller(object):
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
- block_device_mapping=block_device_mapping,
- wait_for_instances=not ret_resv_id)
- except quota.QuotaError as error:
+ block_device_mapping=block_device_mapping)
+ except exception.QuotaError as error:
self._handle_quota_error(error)
except exception.InstanceTypeMemoryTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
@@ -497,7 +444,7 @@ class Controller(object):
instance['instance_type'] = inst_type
instance['image_ref'] = image_href
- server = self._build_view(req, instance, is_detail=True)
+ server = self._build_view(req, instance, is_create=True)
if '_is_precooked' in server['server']:
del server['server']['_is_precooked']
else:
@@ -505,10 +452,11 @@ class Controller(object):
return server
def _delete(self, context, id):
+ instance = self._get_server(context, id)
if FLAGS.reclaim_instance_interval:
- self.compute_api.soft_delete(context, id)
+ self.compute_api.soft_delete(context, instance)
else:
- self.compute_api.delete(context, id)
+ self.compute_api.delete(context, instance)
@scheduler_api.redirect_handler
def update(self, req, id, body):
@@ -619,8 +567,10 @@ class Controller(object):
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
+ instance = self._get_server(context, instance_id)
+
image = self.compute_api.backup(context,
- instance_id,
+ instance,
image_name,
backup_type,
rotation,
@@ -635,8 +585,10 @@ class Controller(object):
return resp
def _action_confirm_resize(self, input_dict, req, id):
+ context = req.environ['nova.context']
+ instance = self._get_server(context, id)
try:
- self.compute_api.confirm_resize(req.environ['nova.context'], id)
+ self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
@@ -646,8 +598,10 @@ class Controller(object):
return exc.HTTPNoContent()
def _action_revert_resize(self, input_dict, req, id):
+ context = req.environ['nova.context']
+ instance = self._get_server(context, id)
try:
- self.compute_api.revert_resize(req.environ['nova.context'], id)
+ self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
@@ -668,9 +622,12 @@ class Controller(object):
msg = _("Missing argument 'type' for reboot")
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ['nova.context']
+ instance = self._get_server(context, id)
+
try:
- self.compute_api.reboot(req.environ['nova.context'], id,
- reboot_type)
+ self.compute_api.reboot(context, instance, reboot_type)
except Exception, e:
LOG.exception(_("Error in reboot %s"), e)
raise exc.HTTPUnprocessableEntity()
@@ -700,9 +657,10 @@ class Controller(object):
def _resize(self, req, instance_id, flavor_id):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
+ instance = self._get_server(context, instance_id)
try:
- self.compute_api.resize(context, instance_id, flavor_id)
+ self.compute_api.resize(context, instance, flavor_id)
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
@@ -748,7 +706,7 @@ class Controller(object):
return common.get_id_from_href(flavor_ref)
- def _build_view(self, req, instance, is_detail=False):
+ def _build_view(self, req, instance, is_detail=False, is_create=False):
context = req.environ['nova.context']
project_id = getattr(context, 'project_id', '')
base_url = req.application_url
@@ -757,7 +715,9 @@ class Controller(object):
addresses_builder = views_addresses.ViewBuilder()
builder = views_servers.ViewBuilder(context, addresses_builder,
flavor_builder, image_builder, base_url, project_id)
- return builder.build(instance, is_detail=is_detail)
+ return builder.build(instance,
+ is_detail=is_detail,
+ is_create=is_create)
def _build_list(self, req, instances, is_detail=False):
params = req.GET.copy()
@@ -786,7 +746,8 @@ class Controller(object):
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
- self.compute_api.set_admin_password(context, id, password)
+ server = self._get_server(context, id)
+ self.compute_api.set_admin_password(context, server, password)
return webob.Response(status_int=202)
def _limit_items(self, items, req):
@@ -816,6 +777,7 @@ class Controller(object):
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
+ instance = self._get_server(context, instance_id)
try:
image_href = info["rebuild"]["imageRef"]
@@ -841,7 +803,7 @@ class Controller(object):
password = utils.generate_password(FLAGS.password_length)
try:
- self.compute_api.rebuild(context, instance_id, image_href,
+ self.compute_api.rebuild(context, instance, image_href,
password, name=name, metadata=metadata,
files_to_inject=injected_files)
except exception.RebuildRequiresActiveInstance:
@@ -851,7 +813,7 @@ class Controller(object):
msg = _("Instance %s could not be found") % instance_id
raise exc.HTTPNotFound(explanation=msg)
- instance = self.compute_api.routing_get(context, instance_id)
+ instance = self._get_server(context, instance_id)
view = self._build_view(request, instance, is_detail=True)
view['server']['adminPass'] = password
@@ -886,9 +848,11 @@ class Controller(object):
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
+ instance = self._get_server(context, instance_id)
+
try:
image = self.compute_api.snapshot(context,
- instance_id,
+ instance,
image_name,
extra_properties=props)
except exception.InstanceBusy:
diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py
index 302212aa8..9e180640b 100644
--- a/nova/api/openstack/views/flavors.py
+++ b/nova/api/openstack/views/flavors.py
@@ -66,7 +66,6 @@ class ViewBuilder(object):
def _build_links(self, flavor_obj):
"""Generate a container of links that refer to the provided flavor."""
- print flavor_obj
href = self.generate_href(flavor_obj["id"])
bookmark = self.generate_bookmark(flavor_obj["id"])
diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py
index 9c30318a7..138a40cb8 100644
--- a/nova/api/openstack/views/limits.py
+++ b/nova/api/openstack/views/limits.py
@@ -16,7 +16,6 @@
# under the License.
import datetime
-import time
from nova.api.openstack import common
from nova import utils
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 288730efe..4a0be46c1 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -41,13 +41,15 @@ class ViewBuilder(object):
self.base_url = base_url
self.project_id = project_id
- def build(self, inst, is_detail=False):
+ def build(self, inst, is_detail=False, is_create=False):
"""Return a dict that represenst a server."""
if inst.get('_is_precooked', False):
server = dict(server=inst)
else:
if is_detail:
server = self._build_detail(inst)
+ elif is_create:
+ server = self._build_create(inst)
else:
server = self._build_simple(inst)
@@ -59,6 +61,12 @@ class ViewBuilder(object):
"""Return a simple model of a server."""
return dict(server=dict(id=inst['uuid'], name=inst['display_name']))
+ def _build_create(self, inst):
+ """Return data that should be returned from a server create."""
+ server = dict(server=dict(id=inst['uuid']))
+ self._build_links(server['server'], inst)
+ return server
+
def _build_detail(self, inst):
"""Returns a detailed model of a server."""
vm_state = inst.get('vm_state', vm_states.BUILDING)
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index a3ca5cd5b..7deba6382 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -477,6 +477,7 @@ class ResponseSerializer(object):
request.environ['nova.action'] = action
if (hasattr(serializer, 'get_template') and
'nova.template' not in request.environ):
+
template = serializer.get_template(action)
request.environ['nova.template'] = template
else:
@@ -512,7 +513,6 @@ class LazySerializationMiddleware(wsgi.Middleware):
# Re-serialize the body
response.body = serializer.serialize(utils.loads(response.body),
**kwargs)
-
return response
@@ -527,6 +527,9 @@ class Resource(wsgi.Application):
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
+ Exceptions derived from webob.exc.HTTPException will be automatically
+ wrapped in faults.Fault() to provide API friendly error responses.
+
"""
def __init__(self, controller, deserializer=None, serializer=None):
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index f1e769278..f576da8e7 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -114,7 +114,7 @@ def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
- is performed, so malformed querys will not work correctly.
+ is performed, so malformed queries will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
@@ -171,7 +171,7 @@ def _match(key, value, attrs):
def _subs(value):
"""Returns a list of subclass strings.
- The strings represent the ldap objectclass plus any subclasses that
+ The strings represent the ldap object class plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index e0504464e..93b4244ad 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -99,7 +99,7 @@ class AuthBase(object):
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
- This allows methods to accept objects or ids as paramaters.
+ This allows methods to accept objects or ids as parameters.
"""
if isinstance(obj, cls):
return obj.id
@@ -692,7 +692,7 @@ class AuthManager(object):
@type admin: bool
@param admin: Whether to set the admin flag. The admin flag gives
- superuser status regardless of roles specifed for the user.
+ superuser status regardless of roles specified for the user.
@type create_project: bool
@param: Whether to create a project for the user with the same name.
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 436dc79b5..3a46e1421 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -45,6 +45,7 @@ LOG = logging.getLogger('nova.compute.api')
FLAGS = flags.FLAGS
+flags.DECLARE('enable_zone_routing', 'nova.scheduler.api')
flags.DECLARE('vncproxy_topic', 'nova.vnc')
flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
@@ -78,8 +79,9 @@ def generate_default_display_name(instance):
return 'Server %s' % instance['id']
-def _is_able_to_shutdown(instance, instance_id):
+def _is_able_to_shutdown(instance):
vm_state = instance["vm_state"]
+ instance_id = instance["id"]
valid_shutdown_states = [
vm_states.ACTIVE,
@@ -96,9 +98,10 @@ def _is_able_to_shutdown(instance, instance_id):
return True
-def _is_queued_delete(instance, instance_id):
+def _is_queued_delete(instance):
vm_state = instance["vm_state"]
task_state = instance["task_state"]
+ instance_id = instance["id"]
if vm_state != vm_states.SOFT_DELETE:
LOG.warn(_("Instance %(instance_id)s is not in a 'soft delete' "
@@ -136,15 +139,16 @@ class API(base.Base):
return
limit = quota.allowed_injected_files(context, len(injected_files))
if len(injected_files) > limit:
- raise quota.QuotaError(code="OnsetFileLimitExceeded")
+ raise exception.QuotaError(code="OnsetFileLimitExceeded")
path_limit = quota.allowed_injected_file_path_bytes(context)
for path, content in injected_files:
if len(path) > path_limit:
- raise quota.QuotaError(code="OnsetFilePathLimitExceeded")
+ raise exception.QuotaError(code="OnsetFilePathLimitExceeded")
content_limit = quota.allowed_injected_file_content_bytes(
context, len(content))
if len(content) > content_limit:
- raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
+ raise exception.QuotaError(
+ code="OnsetFileContentLimitExceeded")
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
@@ -157,7 +161,7 @@ class API(base.Base):
msg = _("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties") % locals()
LOG.warn(msg)
- raise quota.QuotaError(msg, "MetadataLimitExceeded")
+ raise exception.QuotaError(msg, "MetadataLimitExceeded")
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
@@ -168,7 +172,7 @@ class API(base.Base):
msg = _("Quota exceeded for %(pid)s, metadata property "
"key or value too long") % locals()
LOG.warn(msg)
- raise quota.QuotaError(msg, "MetadataLimitExceeded")
+ raise exception.QuotaError(msg, "MetadataLimitExceeded")
def _check_requested_networks(self, context, requested_networks):
""" Check if the networks requested belongs to the project
@@ -189,8 +193,7 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
- block_device_mapping,
- wait_for_instances):
+ block_device_mapping, create_instance_here=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation."""
@@ -225,7 +228,7 @@ class API(base.Base):
else:
message = _("Instance quota exceeded. You can only run %s "
"more instances of this type.") % num_instances
- raise quota.QuotaError(message, "InstanceLimitExceeded")
+ raise exception.QuotaError(message, "InstanceLimitExceeded")
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, injected_files)
@@ -325,10 +328,18 @@ class API(base.Base):
LOG.debug(_("Going to run %s instances...") % num_instances)
- if wait_for_instances:
- rpc_method = rpc.call
- else:
+ if create_instance_here:
+ instance = self.create_db_entry_for_new_instance(
+ context, instance_type, image, base_options,
+ security_group, block_device_mapping)
+ # Tells scheduler we created the instance already.
+ base_options['id'] = instance['id']
rpc_method = rpc.cast
+ else:
+ # We need to wait for the scheduler to create the instance
+ # DB entries, because the instance *could* be # created in
+ # a child zone.
+ rpc_method = rpc.call
# TODO(comstud): We should use rpc.multicall when we can
# retrieve the full instance dictionary from the scheduler.
@@ -344,6 +355,8 @@ class API(base.Base):
num_instances, requested_networks,
block_device_mapping, security_group)
+ if create_instance_here:
+ return ([instance], reservation_id)
return (instances, reservation_id)
@staticmethod
@@ -534,8 +547,7 @@ class API(base.Base):
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None, block_device_mapping=None,
access_ip_v4=None, access_ip_v6=None,
- requested_networks=None, config_drive=None,
- wait_for_instances=True):
+ requested_networks=None, config_drive=None):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
@@ -546,6 +558,13 @@ class API(base.Base):
we waited for information from the scheduler or not.
"""
+ # We can create the DB entry for the instance here if we're
+ # only going to create 1 instance and we're in a single
+ # zone deployment. This speeds up API responses for builds
+ # as we don't need to wait for the scheduler.
+ create_instance_here = (max_count == 1 and
+ not FLAGS.enable_zone_routing)
+
(instances, reservation_id) = self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
@@ -557,10 +576,9 @@ class API(base.Base):
reservation_id, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
- wait_for_instances)
+ create_instance_here=create_instance_here)
- if instances is None:
- # wait_for_instances must have been False
+ if create_instance_here or instances is None:
return (instances, reservation_id)
inst_ret_list = []
@@ -778,12 +796,12 @@ class API(base.Base):
raise
@scheduler_api.reroute_compute("soft_delete")
- def soft_delete(self, context, instance_id):
+ def soft_delete(self, context, instance):
"""Terminate an instance."""
+ instance_id = instance["id"]
LOG.debug(_("Going to try to soft delete %s"), instance_id)
- instance = self._get_instance(context, instance_id, 'soft delete')
- if not _is_able_to_shutdown(instance, instance_id):
+ if not _is_able_to_shutdown(instance):
return
# NOTE(jerdfelt): The compute daemon handles reclaiming instances
@@ -792,7 +810,7 @@ class API(base.Base):
host = instance['host']
if host:
self.update(context,
- instance_id,
+ instance["id"],
vm_state=vm_states.SOFT_DELETE,
task_state=task_states.POWERING_OFF,
deleted_at=utils.utcnow())
@@ -801,7 +819,7 @@ class API(base.Base):
instance_id, host)
else:
LOG.warning(_("No host for instance %s, deleting immediately"),
- instance_id)
+ instance["id"])
self.db.instance_destroy(context, instance_id)
def _delete(self, context, instance):
@@ -818,12 +836,11 @@ class API(base.Base):
self.db.instance_destroy(context, instance['id'])
@scheduler_api.reroute_compute("delete")
- def delete(self, context, instance_id):
+ def delete(self, context, instance):
"""Terminate an instance."""
- LOG.debug(_("Going to try to terminate %s"), instance_id)
- instance = self._get_instance(context, instance_id, 'delete')
+ LOG.debug(_("Going to try to terminate %s"), instance["id"])
- if not _is_able_to_shutdown(instance, instance_id):
+ if not _is_able_to_shutdown(instance):
return
self._delete(context, instance)
@@ -833,7 +850,7 @@ class API(base.Base):
"""Restore a previously deleted (but not reclaimed) instance."""
instance = self._get_instance(context, instance_id, 'restore')
- if not _is_queued_delete(instance, instance_id):
+ if not _is_queued_delete(instance):
return
self.update(context,
@@ -851,22 +868,21 @@ class API(base.Base):
instance_id, host)
@scheduler_api.reroute_compute("force_delete")
- def force_delete(self, context, instance_id):
+ def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
- instance = self._get_instance(context, instance_id, 'force delete')
- if not _is_queued_delete(instance, instance_id):
+ if not _is_queued_delete(instance):
return
self._delete(context, instance)
@scheduler_api.reroute_compute("stop")
- def stop(self, context, instance_id):
+ def stop(self, context, instance):
"""Stop an instance."""
+ instance_id = instance["id"]
LOG.debug(_("Going to try to stop %s"), instance_id)
- instance = self._get_instance(context, instance_id, 'stopping')
- if not _is_able_to_shutdown(instance, instance_id):
+ if not _is_able_to_shutdown(instance):
return
self.update(context,
@@ -881,11 +897,11 @@ class API(base.Base):
self._cast_compute_message('stop_instance', context,
instance_id, host)
- def start(self, context, instance_id):
+ def start(self, context, instance):
"""Start an instance."""
- LOG.debug(_("Going to try to start %s"), instance_id)
- instance = self._get_instance(context, instance_id, 'starting')
vm_state = instance["vm_state"]
+ instance_id = instance["id"]
+ LOG.debug(_("Going to try to start %s"), instance_id)
if vm_state != vm_states.STOPPED:
LOG.warning(_("Instance %(instance_id)s is not "
@@ -1095,41 +1111,41 @@ class API(base.Base):
% instance_id)
@scheduler_api.reroute_compute("backup")
- def backup(self, context, instance_id, name, backup_type, rotation,
+ def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
- :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
name = backup_type # daily backups are called 'daily'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
- recv_meta = self._create_image(context, instance_id, name, 'backup',
+ recv_meta = self._create_image(context, instance, name, 'backup',
backup_type=backup_type, rotation=rotation,
extra_properties=extra_properties)
return recv_meta
@scheduler_api.reroute_compute("snapshot")
- def snapshot(self, context, instance_id, name, extra_properties=None):
+ def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
- :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
- return self._create_image(context, instance_id, name, 'snapshot',
+ return self._create_image(context, instance, name, 'snapshot',
extra_properties=extra_properties)
- def _create_image(self, context, instance_id, name, image_type,
+ def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
"""Create snapshot or backup for an instance on this host.
:param context: security context
- :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
@@ -1138,8 +1154,8 @@ class API(base.Base):
:param extra_properties: dict of extra image properties to include
"""
- instance = self.db.instance_get(context, instance_id)
task_state = instance["task_state"]
+ instance_id = instance['id']
if task_state == task_states.IMAGE_BACKUP:
raise exception.InstanceBackingUp(instance_id=instance_id)
@@ -1163,20 +1179,21 @@ class API(base.Base):
return recv_meta
@scheduler_api.reroute_compute("reboot")
- def reboot(self, context, instance_id, reboot_type):
+ def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
+ state = {'SOFT': task_states.REBOOTING,
+ 'HARD': task_states.REBOOTING_HARD}[reboot_type]
self.update(context,
- instance_id,
+ instance['id'],
vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING)
- self._cast_compute_message('reboot_instance', context, instance_id,
+ task_state=state)
+ self._cast_compute_message('reboot_instance', context, instance['id'],
params={'reboot_type': reboot_type})
@scheduler_api.reroute_compute("rebuild")
- def rebuild(self, context, instance_id, image_href, admin_password,
+ def rebuild(self, context, instance, image_href, admin_password,
name=None, metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
- instance = self.db.instance_get(context, instance_id)
name = name or instance["display_name"]
if instance["vm_state"] != vm_states.ACTIVE:
@@ -1190,7 +1207,7 @@ class API(base.Base):
self._check_metadata_properties_quota(context, metadata)
self.update(context,
- instance_id,
+ instance["id"],
metadata=metadata,
display_name=name,
image_ref=image_href,
@@ -1205,73 +1222,68 @@ class API(base.Base):
self._cast_compute_message('rebuild_instance',
context,
- instance_id,
+ instance["id"],
params=rebuild_params)
@scheduler_api.reroute_compute("revert_resize")
- def revert_resize(self, context, instance_id):
+ def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
- instance_ref = self._get_instance(context, instance_id,
- 'revert_resize')
migration_ref = self.db.migration_get_by_instance_and_status(context,
- instance_ref['uuid'], 'finished')
+ instance['uuid'], 'finished')
if not migration_ref:
- raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
- status='finished')
+ raise exception.MigrationNotFoundByStatus(
+ instance_id=instance['id'], status='finished')
self.update(context,
- instance_id,
+ instance['id'],
vm_state=vm_states.ACTIVE,
task_state=None)
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context,
- instance_ref['uuid'],
+ instance['uuid'],
migration_ref['dest_compute'],
params=params)
self.db.migration_update(context, migration_ref['id'],
- {'status': 'reverted'})
+ {'status': 'reverted'})
@scheduler_api.reroute_compute("confirm_resize")
- def confirm_resize(self, context, instance_id):
+ def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
context = context.elevated()
- instance_ref = self._get_instance(context, instance_id,
- 'confirm_resize')
migration_ref = self.db.migration_get_by_instance_and_status(context,
- instance_ref['uuid'], 'finished')
+ instance['uuid'], 'finished')
if not migration_ref:
- raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
- status='finished')
+ raise exception.MigrationNotFoundByStatus(
+ instance_id=instance['id'], status='finished')
self.update(context,
- instance_id,
+ instance['id'],
vm_state=vm_states.ACTIVE,
task_state=None)
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context,
- instance_ref['uuid'],
+ instance['uuid'],
migration_ref['source_compute'],
params=params)
self.db.migration_update(context, migration_ref['id'],
{'status': 'confirmed'})
- self.db.instance_update(context, instance_id,
+ self.db.instance_update(context, instance['id'],
{'host': migration_ref['dest_compute'], })
@scheduler_api.reroute_compute("resize")
- def resize(self, context, instance_id, flavor_id=None):
+ def resize(self, context, instance, flavor_id=None):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
- instance_ref = self._get_instance(context, instance_id, 'resize')
- current_instance_type = instance_ref['instance_type']
+ current_instance_type = instance['instance_type']
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
@@ -1297,29 +1309,38 @@ class API(base.Base):
raise exception.CannotResizeToSameSize()
self.update(context,
- instance_id,
+ instance['id'],
vm_state=vm_states.RESIZING,
task_state=task_states.RESIZE_PREP)
- instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_ref['uuid'],
+ "instance_id": instance['uuid'],
+ "update_db": False,
"instance_type_id": new_instance_type['id']}})
@scheduler_api.reroute_compute("add_fixed_ip")
- def add_fixed_ip(self, context, instance_id, network_id):
+ def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
- self._cast_compute_message('add_fixed_ip_to_instance', context,
+ #NOTE(bcwaldon): We need to use the integer id since the
+ # network manager doesn't support uuids
+ instance_id = instance['id']
+ self._cast_compute_message('add_fixed_ip_to_instance',
+ context,
instance_id,
params=dict(network_id=network_id))
@scheduler_api.reroute_compute("remove_fixed_ip")
- def remove_fixed_ip(self, context, instance_id, address):
+ def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
- self._cast_compute_message('remove_fixed_ip_from_instance', context,
- instance_id, params=dict(address=address))
+ #NOTE(bcwaldon): We need to use the integer id since the
+ # network manager doesn't support uuids
+ instance_id = instance['id']
+ self._cast_compute_message('remove_fixed_ip_from_instance',
+ context,
+ instance_id,
+ params=dict(address=address))
#TODO(tr3buchet): how to run this in the correct zone?
def add_network_to_project(self, context, project_id):
@@ -1332,8 +1353,9 @@ class API(base.Base):
self.network_api.add_network_to_project(context, project_id)
@scheduler_api.reroute_compute("pause")
- def pause(self, context, instance_id):
+ def pause(self, context, instance):
"""Pause the given instance."""
+ instance_id = instance["id"]
self.update(context,
instance_id,
vm_state=vm_states.ACTIVE,
@@ -1341,8 +1363,9 @@ class API(base.Base):
self._cast_compute_message('pause_instance', context, instance_id)
@scheduler_api.reroute_compute("unpause")
- def unpause(self, context, instance_id):
+ def unpause(self, context, instance):
"""Unpause the given instance."""
+ instance_id = instance["id"]
self.update(context,
instance_id,
vm_state=vm_states.PAUSED,
@@ -1377,8 +1400,9 @@ class API(base.Base):
return self.db.instance_get_actions(context, instance_id)
@scheduler_api.reroute_compute("suspend")
- def suspend(self, context, instance_id):
+ def suspend(self, context, instance):
"""Suspend the given instance."""
+ instance_id = instance["id"]
self.update(context,
instance_id,
vm_state=vm_states.ACTIVE,
@@ -1386,8 +1410,9 @@ class API(base.Base):
self._cast_compute_message('suspend_instance', context, instance_id)
@scheduler_api.reroute_compute("resume")
- def resume(self, context, instance_id):
+ def resume(self, context, instance):
"""Resume the given instance."""
+ instance_id = instance["id"]
self.update(context,
instance_id,
vm_state=vm_states.SUSPENDED,
@@ -1395,8 +1420,9 @@ class API(base.Base):
self._cast_compute_message('resume_instance', context, instance_id)
@scheduler_api.reroute_compute("rescue")
- def rescue(self, context, instance_id, rescue_password=None):
+ def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
+ instance_id = instance['uuid']
self.update(context,
instance_id,
vm_state=vm_states.ACTIVE,
@@ -1409,8 +1435,9 @@ class API(base.Base):
params=rescue_params)
@scheduler_api.reroute_compute("unrescue")
- def unrescue(self, context, instance_id):
+ def unrescue(self, context, instance):
"""Unrescue the given instance."""
+ instance_id = instance['uuid']
self.update(context,
instance_id,
vm_state=vm_states.RESCUED,
@@ -1418,8 +1445,15 @@ class API(base.Base):
self._cast_compute_message('unrescue_instance', context, instance_id)
@scheduler_api.reroute_compute("set_admin_password")
- def set_admin_password(self, context, instance_id, password=None):
+ def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
+ #NOTE(bcwaldon): we need to use the integer id here since manager uses
+ # db.instance_get, not db.instance_get_by_uuid
+ instance_id = instance['id']
+ self.update(context,
+ instance_id,
+ task_state=task_states.UPDATING_PASSWORD)
+
host = self._find_host(context, instance_id)
rpc.cast(context,
@@ -1427,15 +1461,19 @@ class API(base.Base):
{"method": "set_admin_password",
"args": {"instance_id": instance_id, "new_pass": password}})
- def inject_file(self, context, instance_id):
+ @scheduler_api.reroute_compute("inject_file")
+ def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
- self._cast_compute_message('inject_file', context, instance_id)
+ instance_id = instance['id']
+ params = {'path': path, 'file_contents': file_contents}
+ self._cast_compute_message('inject_file', context,
+ instance_id, params=params)
- def get_ajax_console(self, context, instance_id):
+ def get_ajax_console(self, context, instance):
"""Get a url to an AJAX Console."""
output = self._call_compute_message('get_ajax_console',
context,
- instance_id)
+ instance['id'])
rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
@@ -1443,27 +1481,27 @@ class API(base.Base):
return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
- def get_vnc_console(self, context, instance_id):
+ def get_vnc_console(self, context, instance):
"""Get a url to a VNC Console."""
- instance = self.get(context, instance_id)
output = self._call_compute_message('get_vnc_console',
context,
- instance_id)
+ instance['id'])
rpc.call(context, '%s' % FLAGS.vncproxy_topic,
{'method': 'authorize_vnc_console',
'args': {'token': output['token'],
'host': output['host'],
'port': output['port']}})
- # hostignore and portignore are compatability params for noVNC
+ # hostignore and portignore are compatibility params for noVNC
return {'url': '%s/vnc_auto.html?token=%s&host=%s&port=%s' % (
FLAGS.vncproxy_url,
output['token'],
'hostignore',
'portignore')}
- def get_console_output(self, context, instance_id):
+ def get_console_output(self, context, instance):
"""Get console output for an an instance."""
+ instance_id = instance['id']
return self._call_compute_message('get_console_output',
context,
instance_id)
@@ -1527,7 +1565,8 @@ class API(base.Base):
# TODO(tr3buchet): currently network_info doesn't contain floating IPs
# in its info, if this changes, the next few lines will need to
- # accomodate the info containing floating as well as fixed ip addresses
+ # accommodate the info containing floating as well as fixed ip
+ # addresses
fixed_ip_addrs = []
for info in self.network_api.get_instance_nw_info(context,
instance):
@@ -1548,24 +1587,16 @@ class API(base.Base):
floating_address=address,
fixed_address=fixed_ip_addrs[0])
- def _get_native_instance_id(self, context, instance_id):
- """If an instance id is a UUID, convert it to a native ID."""
- if utils.is_uuid_like(instance_id):
- instance_id = self.get(context, instance_id)['id']
- return instance_id
-
- def get_instance_metadata(self, context, instance_id):
+ def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
- instance_id = self._get_native_instance_id(context, instance_id)
- rv = self.db.instance_metadata_get(context, instance_id)
+ rv = self.db.instance_metadata_get(context, instance['id'])
return dict(rv.iteritems())
- def delete_instance_metadata(self, context, instance_id, key):
+ def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
- instance_id = self._get_native_instance_id(context, instance_id)
- self.db.instance_metadata_delete(context, instance_id, key)
+ self.db.instance_metadata_delete(context, instance['id'], key)
- def update_instance_metadata(self, context, instance_id,
+ def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
@@ -1573,14 +1604,13 @@ class API(base.Base):
`metadata` argument will be deleted.
"""
- instance_id = self._get_native_instance_id(context, instance_id)
-
if delete:
_metadata = metadata
else:
- _metadata = self.get_instance_metadata(context, instance_id)
+ _metadata = self.get_instance_metadata(context, instance)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
- self.db.instance_metadata_update(context, instance_id, _metadata, True)
+ self.db.instance_metadata_update(context, instance['id'],
+ _metadata, True)
return _metadata
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b1e75cd9a..4927e747d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -33,6 +33,7 @@ terminating it.
"""
+import contextlib
import datetime
import functools
import os
@@ -126,13 +127,18 @@ def checks_instance_lock(function):
return decorated_function
+def _get_image_meta(context, image_ref):
+ image_service, image_id = nova.image.get_image_service(context, image_ref)
+ return image_service.show(context, image_id)
+
+
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
- # and redocument the module docstring
+ # and re-document the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
@@ -309,7 +315,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
- def _check_image_size():
+ def _check_image_size(image_meta):
"""Ensure image is smaller than the maximum size allowed by the
instance_type.
@@ -324,13 +330,6 @@ class ComputeManager(manager.SchedulerDependentManager):
image, but is accurate because it reflects the image's
actual size.
"""
- # NOTE(jk0): image_ref is defined in the DB model, image_href is
- # used by the image service. This should be refactored to be
- # consistent.
- image_href = instance['image_ref']
- image_service, image_id = nova.image.get_image_service(context,
- image_href)
- image_meta = image_service.show(context, image_id)
try:
size_bytes = image_meta['size']
@@ -354,6 +353,7 @@ class ComputeManager(manager.SchedulerDependentManager):
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+ image_id = image_meta['id']
LOG.debug(_("image_id=%(image_id)s, image_size_bytes="
"%(size_bytes)d, allowed_size_bytes="
"%(allowed_size_bytes)d") % locals())
@@ -401,6 +401,18 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.deallocate_for_instance(context,
instance)
+ def _cleanup():
+ with utils.save_and_reraise_exception():
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ERROR)
+ if network_info is not None:
+ _deallocate_network()
+
+ def _error_message(instance_id, message):
+ return _("Instance '%(instance_id)s' "
+ "failed %(message)s.") % locals()
+
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
@@ -409,7 +421,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
- _check_image_size()
+ image_meta = _get_image_meta(context, instance['image_ref'])
+
+ _check_image_size(image_meta)
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
@@ -423,14 +437,19 @@ class ComputeManager(manager.SchedulerDependentManager):
instance['admin_pass'] = kwargs.get('admin_password', None)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
- network_info = _make_network_info()
try:
+ network_info = None
+ with utils.logging_error(_error_message(instance_id,
+ "network setup")):
+ network_info = _make_network_info()
+
self._instance_update(context,
instance_id,
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
-
- block_device_info = _make_block_device_info()
+ with utils.logging_error(_error_message(instance_id,
+ "block device setup")):
+ block_device_info = _make_block_device_info()
self._instance_update(context,
instance_id,
@@ -438,25 +457,18 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state=task_states.SPAWNING)
# TODO(vish) check to make sure the availability zone matches
- try:
- self.driver.spawn(context, instance,
+ with utils.logging_error(_error_message(instance_id,
+ "failed to spawn")):
+ self.driver.spawn(context, instance, image_meta,
network_info, block_device_info)
- except Exception as error: # pylint: disable=W0702
- LOG.exception(_("Instance '%(instance_id)s' failed to spawn. "
- "Details: %(error)s") % locals())
- self._instance_update(context,
- instance_id,
- vm_state=vm_states.ERROR)
- _deallocate_network()
- return
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance_id,
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- launched_at=utils.utcnow())
+ instance = self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
@@ -469,9 +481,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# deleted before it actually got created. This should
# be fixed once we have no-db-messaging
pass
- except:
- with utils.original_exception_raised():
- _deallocate_network()
+ except Exception:
+ _cleanup()
def _get_instance_volume_bdms(self, context, instance_id):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
@@ -652,7 +663,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref.admin_pass = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
- self.driver.spawn(context, instance_ref, network_info, bd_mapping)
+ image_meta = _get_image_meta(context, instance_ref['image_ref'])
+
+ self.driver.spawn(context, instance_ref, image_meta,
+ network_info, bd_mapping)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
@@ -680,8 +694,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._instance_update(context,
instance_id,
power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING)
+ vm_state=vm_states.ACTIVE)
if instance_ref['power_state'] != power_state.RUNNING:
state = instance_ref['power_state']
@@ -825,18 +838,25 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_state = power_state.RUNNING
if instance_state != expected_state:
+ self._instance_update(context, instance_id, task_state=None)
raise exception.Error(_('Instance is not running'))
else:
try:
self.driver.set_admin_password(instance_ref, new_pass)
LOG.audit(_("Instance %s: Root password set"),
instance_ref["name"])
+ self._instance_update(context,
+ instance_id,
+ task_state=None)
break
except NotImplementedError:
# NOTE(dprince): if the driver doesn't implement
# set_admin_password we break to avoid a loop
LOG.warn(_('set_admin_password is not implemented '
- 'by this driver.'))
+ 'by this driver.'))
+ self._instance_update(context,
+ instance_id,
+ task_state=None)
break
except Exception, e:
# Catch all here because this could be anything.
@@ -845,6 +865,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# At some point this exception may make it back
# to the API caller, and we don't want to reveal
# too much. The real exception is logged above
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ERROR)
raise exception.Error(_('Internal error'))
time.sleep(1)
continue
@@ -1070,8 +1093,16 @@ class ComputeManager(manager.SchedulerDependentManager):
migration_id,
{'status': 'migrating'})
- disk_info = self.driver.migrate_disk_and_power_off(
- context, instance_ref, migration_ref['dest_host'])
+ try:
+ disk_info = self.driver.migrate_disk_and_power_off(
+ context, instance_ref, migration_ref['dest_host'])
+ except exception.MigrationError, error:
+ LOG.error(_('%s. Setting instance vm_state to ERROR') % (error,))
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ERROR)
+ return
+
self.db.migration_update(context,
migration_id,
{'status': 'post-migrating'})
@@ -1117,12 +1148,18 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref.uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
+
+ # Have to look up image here since we depend on disk_format later
+ image_meta = _get_image_meta(context, instance_ref['image_ref'])
+
self.driver.finish_migration(context, migration_ref, instance_ref,
- disk_info, network_info, resize_instance)
+ disk_info, network_info, image_meta,
+ resize_instance)
self._instance_update(context,
instance_id,
vm_state=vm_states.ACTIVE,
+ host=migration_ref['dest_compute'],
task_state=task_states.RESIZE_VERIFY)
self.db.migration_update(context, migration_id,
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index b52140bf8..c6016b509 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -46,6 +46,7 @@ RESIZE_VERIFY = 'resize_verify'
REBUILDING = 'rebuilding'
REBOOTING = 'rebooting'
+REBOOTING_HARD = 'rebooting_hard'
PAUSING = 'pausing'
UNPAUSING = 'unpausing'
SUSPENDING = 'suspending'
diff --git a/nova/context.py b/nova/context.py
index de5b791c4..36d15ba08 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -20,6 +21,7 @@
import uuid
+from nova import local
from nova import utils
@@ -51,6 +53,7 @@ class RequestContext(object):
self.request_id = request_id
self.auth_token = auth_token
self.strategy = strategy
+ local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
diff --git a/nova/crypto.py b/nova/crypto.py
index 71bef80f2..e97852572 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -52,7 +52,7 @@ flags.DEFINE_string('key_file',
os.path.join('private', 'cakey.pem'),
_('Filename of private key'))
flags.DEFINE_string('crl_file', 'crl.pem',
- _('Filename of root Certificate Revokation List'))
+ _('Filename of root Certificate Revocation List'))
flags.DEFINE_string('keys_path', '$state_path/keys',
_('Where we keep our keys'))
flags.DEFINE_string('ca_path', '$state_path/CA',
diff --git a/nova/db/api.py b/nova/db/api.py
index 7af81a1a2..4af79b13c 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -20,6 +20,15 @@
The underlying driver is loaded as a :class:`LazyPluggable`.
+Functions in this module are imported into the nova.db namespace. Call these
+functions from nova.db namespace, not the nova.db.api namespace.
+
+All functions in this module return objects that implement a dictionary-like
+interface. Currently, many of these objects are sqlalchemy objects that
+implement a dictionary interface. However, a future goal is to have all of
+these objects be simple dictionaries.
+
+
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
@@ -1616,3 +1625,92 @@ def s3_image_get_by_uuid(context, image_uuid):
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid"""
return IMPL.s3_image_create(context, image_uuid)
+
+
+####################
+
+
+def sm_backend_conf_create(context, values):
+ """Create a new SM Backend Config entry."""
+ return IMPL.sm_backend_conf_create(context, values)
+
+
+def sm_backend_conf_update(context, sm_backend_conf_id, values):
+ """Update a SM Backend Config entry."""
+ return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values)
+
+
+def sm_backend_conf_delete(context, sm_backend_conf_id):
+ """Delete a SM Backend Config."""
+ return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
+
+
+def sm_backend_conf_get(context, sm_backend_conf_id):
+ """Get a specific SM Backend Config."""
+ return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
+
+
+def sm_backend_conf_get_by_sr(context, sr_uuid):
+ """Get a specific SM Backend Config."""
+ return IMPL.sm_backend_conf_get(context, sr_uuid)
+
+
+def sm_backend_conf_get_all(context):
+ """Get all SM Backend Configs."""
+ return IMPL.sm_backend_conf_get_all(context)
+
+
+####################
+
+
+def sm_flavor_create(context, values):
+ """Create a new SM Flavor entry."""
+ return IMPL.sm_flavor_create(context, values)
+
+
+def sm_flavor_update(context, sm_flavor_id, values):
+ """Update a SM Flavor entry."""
+ return IMPL.sm_flavor_update(context, values)
+
+
+def sm_flavor_delete(context, sm_flavor_id):
+ """Delete a SM Flavor."""
+ return IMPL.sm_flavor_delete(context, sm_flavor_id)
+
+
+def sm_flavor_get(context, sm_flavor):
+ """Get a specific SM Flavor."""
+ return IMPL.sm_flavor_get(context, sm_flavor)
+
+
+def sm_flavor_get_all(context):
+ """Get all SM Flavors."""
+ return IMPL.sm_flavor_get_all(context)
+
+
+####################
+
+
+def sm_volume_create(context, values):
+ """Create a new child Zone entry."""
+ return IMPL.sm_volume_create(context, values)
+
+
+def sm_volume_update(context, volume_id, values):
+ """Update a child Zone entry."""
+ return IMPL.sm_volume_update(context, values)
+
+
+def sm_volume_delete(context, volume_id):
+ """Delete a child Zone."""
+ return IMPL.sm_volume_delete(context, volume_id)
+
+
+def sm_volume_get(context, volume_id):
+ """Get a specific child Zone."""
+ return IMPL.sm_volume_get(context, volume_id)
+
+
+def sm_volume_get_all(context):
+ """Get all child Zones."""
+ return IMPL.sm_volume_get_all(context)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 4cd9c2eb1..77d018d43 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -124,7 +124,7 @@ def require_context(f):
def require_instance_exists(f):
"""Decorator to require the specified instance to exist.
- Requres the wrapped function to use context and instance_id as
+ Requires the wrapped function to use context and instance_id as
their first two arguments.
"""
@@ -138,7 +138,7 @@ def require_instance_exists(f):
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
- Requres the wrapped function to use context and volume_id as
+ Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
@@ -947,7 +947,7 @@ def fixed_ip_update(context, address, values):
@require_context
def virtual_interface_create(context, values):
- """Create a new virtual interface record in teh database.
+ """Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
@@ -1043,7 +1043,7 @@ def virtual_interface_get_by_fixed_ip(context, fixed_ip_id):
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual interfaces for instance.
- :param instance_id: = id of the instance to retreive vifs for
+ :param instance_id: = id of the instance to retrieve vifs for
"""
session = get_session()
vif_refs = session.query(models.VirtualInterface).\
@@ -1072,7 +1072,7 @@ def virtual_interface_get_by_instance_and_network(context, instance_id,
def virtual_interface_get_by_network(context, network_id):
"""Gets all virtual_interface on network.
- :param network_id: = network to retreive vifs for
+ :param network_id: = network to retrieve vifs for
"""
session = get_session()
vif_refs = session.query(models.VirtualInterface).\
@@ -1085,7 +1085,7 @@ def virtual_interface_get_by_network(context, network_id):
@require_context
def virtual_interface_delete(context, vif_id):
- """Delete virtual interface record from teh database.
+ """Delete virtual interface record from the database.
:param vif_id: = id of vif to delete
"""
@@ -1724,7 +1724,7 @@ def network_associate(context, project_id, force=False):
or if force is True
force solves race condition where a fresh project has multiple instance
- builds simultaneosly picked up by multiple network hosts which attempt
+ builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
@@ -1744,7 +1744,7 @@ def network_associate(context, project_id, force=False):
network_ref = network_query(project_id)
if force or not network_ref:
- # in force mode or project doesn't have a network so assocaite
+ # in force mode or project doesn't have a network so associate
# with a new network
# get new network
@@ -1882,8 +1882,8 @@ def network_get_all_by_uuids(context, network_uuids, project_id=None):
#check if host is set to all of the networks
# returned in the result
for network in result:
- if network['host'] is None:
- raise exception.NetworkHostNotSet(network_id=network['id'])
+ if network['host'] is None:
+ raise exception.NetworkHostNotSet(network_id=network['id'])
#check if the result contains all the networks
#we are looking for
@@ -4067,3 +4067,152 @@ def s3_image_create(context, image_uuid):
raise exception.DBError(e)
return s3_image_ref
+
+
+####################
+
+
+@require_admin_context
+def sm_backend_conf_create(context, values):
+ backend_conf = models.SMBackendConf()
+ backend_conf.update(values)
+ backend_conf.save()
+ return backend_conf
+
+
+@require_admin_context
+def sm_backend_conf_update(context, sm_backend_id, values):
+ session = get_session()
+ backend_conf = session.query(models.SMBackendConf).\
+ filter_by(id=sm_backend_id).first()
+ if not backend_conf:
+ raise exception.NotFound(_("No backend config with id "\
+ "%(sm_backend_id)s") % locals())
+ backend_conf.update(values)
+ backend_conf.save(session=session)
+ return backend_conf
+
+
+@require_admin_context
+def sm_backend_conf_delete(context, sm_backend_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.SMBackendConf).\
+ filter_by(id=sm_backend_id).\
+ delete()
+
+
+@require_admin_context
+def sm_backend_conf_get(context, sm_backend_id):
+ session = get_session()
+ result = session.query(models.SMBackendConf).\
+ filter_by(id=sm_backend_id).first()
+ if not result:
+ raise exception.NotFound(_("No backend config with id "\
+ "%(sm_backend_id)s") % locals())
+ return result
+
+
+@require_admin_context
+def sm_backend_conf_get_by_sr(context, sr_uuid):
+ session = get_session()
+ result = session.query(models.SMBackendConf).filter_by(sr_uuid=sr_uuid)
+ return result
+
+
+@require_admin_context
+def sm_backend_conf_get_all(context):
+ session = get_session()
+ return session.query(models.SMBackendConf).all()
+
+
+####################
+
+
+@require_admin_context
+def sm_flavor_create(context, values):
+ sm_flavor = models.SMFlavors()
+ sm_flavor.update(values)
+ sm_flavor.save()
+ return sm_flavor
+
+
+@require_admin_context
+def sm_flavor_update(context, sm_flavor_label, values):
+ session = get_session()
+ sm_flavor = session.query(models.SMFlavors).\
+ filter_by(label=sm_flavor_label)
+ if not sm_flavor:
+ raise exception.NotFound(_("No sm_flavor with id "\
+ "%(sm_flavor_id)s") % locals())
+ sm_flavor.update(values)
+ sm_flavor.save()
+ return sm_flavor
+
+
+@require_admin_context
+def sm_flavor_delete(context, sm_flavor_label):
+ session = get_session()
+ with session.begin():
+ session.query(models.SMFlavors).\
+ filter_by(label=sm_flavor_label).\
+ delete()
+
+
+@require_admin_context
+def sm_flavor_get(context, sm_flavor):
+ session = get_session()
+ result = session.query(models.SMFlavors).filter_by(label=sm_flavor)
+ if not result:
+ raise exception.NotFound(_("No sm_flavor called %(sm_flavor)s") \
+ % locals())
+ return result
+
+
+@require_admin_context
+def sm_flavor_get_all(context):
+ session = get_session()
+ return session.query(models.SMFlavors).all()
+
+
+###############################
+
+
+def sm_volume_create(context, values):
+ sm_volume = models.SMVolume()
+ sm_volume.update(values)
+ sm_volume.save()
+ return sm_volume
+
+
+def sm_volume_update(context, volume_id, values):
+ session = get_session()
+ sm_volume = session.query(models.SMVolume).filter_by(id=volume_id).first()
+ if not sm_volume:
+ raise exception.NotFound(_("No sm_volume with id %(volume_id)s") \
+ % locals())
+ sm_volume.update(values)
+ sm_volume.save()
+ return sm_volume
+
+
+def sm_volume_delete(context, volume_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.SMVolume).\
+ filter_by(id=volume_id).\
+ delete()
+
+
+def sm_volume_get(context, volume_id):
+ session = get_session()
+ result = session.query(models.SMVolume).filter_by(id=volume_id).first()
+ if not result:
+ raise exception.NotFound(_("No sm_volume with id %(volume_id)s") \
+ % locals())
+ return result
+
+
+def sm_volume_get_all(context):
+ session = get_session()
+ return session.query(models.SMVolume).all()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py
new file mode 100644
index 000000000..2c1d497c1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+flavors = Table('sm_flavors', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('label',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+backend = Table('sm_backend_config', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'),
+ nullable=False),
+ Column('sr_uuid',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('sr_type',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('config_params',
+ String(length=2047,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ )
+
+sm_vol = Table('sm_volume', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), ForeignKey('volumes.id'),
+ primary_key=True, nullable=False),
+ Column('backend_id', Integer(), ForeignKey('sm_backend_config.id'),
+ nullable=False),
+ Column('vdi_uuid',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+#
+# Tables to alter
+#
+
+# (none currently)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (flavors, backend, sm_vol):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 56f2b4aad..2aba02388 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -880,6 +880,32 @@ class S3Image(BASE, NovaBase):
uuid = Column(String(36), nullable=False)
+class SMFlavors(BASE, NovaBase):
+ """Represents a flavor for SM volumes."""
+ __tablename__ = 'sm_flavors'
+ id = Column(Integer(), primary_key=True)
+ label = Column(String(255))
+ description = Column(String(255))
+
+
+class SMBackendConf(BASE, NovaBase):
+ """Represents the connection to the backend for SM."""
+ __tablename__ = 'sm_backend_config'
+ id = Column(Integer(), primary_key=True)
+ flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
+ sr_uuid = Column(String(255))
+ sr_type = Column(String(255))
+ config_params = Column(String(2047))
+
+
+class SMVolume(BASE, NovaBase):
+ __tablename__ = 'sm_volume'
+ id = Column(Integer(), ForeignKey(Volume.id), primary_key=True)
+ backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
+ nullable=False)
+ vdi_uuid = Column(String(255))
+
+
def register_models():
"""Register Models and create metadata.
@@ -895,7 +921,7 @@ def register_models():
Project, Certificate, ConsolePool, Console, Zone,
VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs,
AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration,
- VirtualStorageArray)
+ VirtualStorageArray, SMFlavors, SMBackendConf, SMVolume)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 998fece1e..d749d89a0 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -107,6 +107,8 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None,
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
+ # TODO(johannes): Also, it would be nice to use
+ # utils.save_and_reraise_exception() without an import loop
def inner(f):
def wrapped(*args, **kw):
try:
@@ -206,6 +208,10 @@ class Invalid(NovaException):
message = _("Unacceptable parameters.")
+class InvalidRequest(Invalid):
+ message = _("The request is invalid.")
+
+
class InvalidSignature(Invalid):
message = _("Invalid signature %(signature)s for user %(user)s.")
@@ -567,7 +573,7 @@ class NoFloatingIpsDefined(NotFound):
class KeypairNotFound(NotFound):
- message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
+ message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
@@ -857,3 +863,16 @@ class InsufficientFreeMemory(NovaException):
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
+
+
+class NoValidHost(NovaException):
+ message = _("No valid host was found. %(reason)s")
+
+
+class WillNotSchedule(NovaException):
+ message = _("Host %(host)s is not up or doesn't exist.")
+
+
+class QuotaError(ApiError):
+ """Quota Exceeded."""
+ pass
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index e7e9dab77..2807a76f6 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
+"""Based a bit on the carrot.backends.queue backend... but a lot better."""
import Queue as queue
diff --git a/nova/flags.py b/nova/flags.py
index 20225eba5..bc4fd475d 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -474,3 +474,5 @@ DEFINE_integer('reclaim_instance_interval', 0,
DEFINE_integer('zombie_instance_updated_at_window', 172800,
'Limit in seconds that a zombie instance can exist before '
'being cleaned up.')
+
+DEFINE_boolean('allow_ec2_admin_api', False, 'Enable/Disable EC2 Admin API')
diff --git a/nova/image/fake.py b/nova/image/fake.py
index a51a8ecb4..43e889ad4 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -118,6 +118,7 @@ class _FakeImageService(object):
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
+ self._imagedata = {}
super(_FakeImageService, self).__init__()
def index(self, context, filters=None, marker=None, limit=None):
@@ -132,6 +133,11 @@ class _FakeImageService(object):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
+ def get(self, context, image_id, data):
+ metadata = self.show(context, image_id)
+ data.write(self._imagedata.get(image_id, ''))
+ return metadata
+
def show(self, context, image_id):
"""Get data about specified image.
@@ -164,6 +170,8 @@ class _FakeImageService(object):
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
+ if data:
+ self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
diff --git a/nova/image/glance.py b/nova/image/glance.py
index c9ce946b8..9eea8eb4c 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -396,7 +396,7 @@ def _parse_glance_iso8601_timestamp(timestamp):
pass
raise ValueError(_('%(timestamp)s does not follow any of the '
- 'signatures: %(ISO_FORMATS)s') % locals())
+ 'signatures: %(iso_formats)s') % locals())
# TODO(yamahata): use block-device-mapping extension to glance
diff --git a/builddeb.sh b/nova/local.py
index e354dc241..19d962732 100755..100644
--- a/builddeb.sh
+++ b/nova/local.py
@@ -1,7 +1,6 @@
-#!/bin/sh
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,5 +15,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-dpkg-buildpackage -b -rfakeroot -tc -uc -D
+"""Greenthread local storage of variables using weak references"""
+import weakref
+
+from eventlet import corolocal
+
+
+class WeakLocal(corolocal.local):
+ def __getattribute__(self, attr):
+ rval = corolocal.local.__getattribute__(self, attr)
+ if rval:
+ rval = rval()
+ return rval
+
+ def __setattr__(self, attr, value):
+ value = weakref.ref(value)
+ return corolocal.local.__setattr__(self, attr, value)
+
+
+store = WeakLocal()
diff --git a/nova/log.py b/nova/log.py
index 1e04f755d..2121933ab 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -38,6 +39,7 @@ import traceback
import nova
from nova import flags
+from nova import local
from nova import version
@@ -62,6 +64,7 @@ flags.DEFINE_list('default_log_levels',
['amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
+ 'suds=INFO',
'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs')
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
@@ -129,7 +132,7 @@ def _get_log_file_path(binary=None):
class NovaLogger(logging.Logger):
"""NovaLogger manages request context and formatting.
- This becomes the class that is instanciated by logging.getLogger.
+ This becomes the class that is instantiated by logging.getLogger.
"""
@@ -152,6 +155,8 @@ class NovaLogger(logging.Logger):
"""Extract context from any log call."""
if not extra:
extra = {}
+ if context is None:
+ context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
extra.update({"nova_version": version.version_string_with_vcs()})
@@ -245,11 +250,12 @@ class NovaRootLogger(NovaLogger):
def setup_from_flags(self):
"""Setup logger from flags."""
global _filelog
+ if self.syslog:
+ self.removeHandler(self.syslog)
+ self.syslog = None
if FLAGS.use_syslog:
self.syslog = SysLogHandler(address='/dev/log')
self.addHandler(self.syslog)
- elif self.syslog:
- self.removeHandler(self.syslog)
logpath = _get_log_file_path()
if logpath:
self.removeHandler(self.streamlog)
@@ -318,7 +324,7 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
- """Shortcut for logging to root log with sevrity 'AUDIT'."""
+ """Shortcut for logging to root log with severity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index fdd01b2fc..0cda55b32 100755
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -218,14 +218,14 @@ class IptablesManager(object):
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
- For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are
- wrapped, meaning that the "real" INPUT chain has a rule that jumps to the
- wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
+ For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
+ are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
+ the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
- For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
- wrapped in the same was as the builtin filter chains. Additionally, there's
- a snat chain that is applied after the POSTROUTING chain.
+ For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
+ wrapped in the same was as the built-in filter chains. Additionally,
+ there's a snat chain that is applied after the POSTROUTING chain.
"""
@@ -253,7 +253,7 @@ class IptablesManager(object):
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
- # Wrap the builtin chains
+ # Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
@@ -283,10 +283,10 @@ class IptablesManager(object):
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
- # And then we add a floating-snat chain and jump to first thing in
+ # And then we add a float-snat chain and jump to first thing in
# the snat chain.
- self.ipv4['nat'].add_chain('floating-snat')
- self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
+ self.ipv4['nat'].add_chain('float-snat')
+ self.ipv4['nat'].add_rule('snat', '-j $float-snat')
@utils.synchronized('iptables', external=True)
def apply(self):
@@ -364,7 +364,7 @@ class IptablesManager(object):
return True
# We filter duplicates, letting the *last* occurrence take
- # precendence.
+ # precedence.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter.reverse()
@@ -470,7 +470,7 @@ def remove_floating_forward(floating_ip, fixed_ip):
def floating_forward_rules(floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
- ('floating-snat',
+ ('float-snat',
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
@@ -595,11 +595,29 @@ def release_dhcp(dev, address, mac_address):
utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True)
+def update_dhcp(context, dev, network_ref):
+ conffile = _dhcp_file(dev, 'conf')
+ with open(conffile, 'w') as f:
+ f.write(get_dhcp_hosts(context, network_ref))
+ restart_dhcp(dev, network_ref)
+
+
+def update_dhcp_hostfile_with_text(dev, hosts_text):
+ conffile = _dhcp_file(dev, 'conf')
+ with open(conffile, 'w') as f:
+ f.write(hosts_text)
+
+
+def kill_dhcp(dev):
+ pid = _dnsmasq_pid_for(dev)
+ _execute('kill', '-9', pid, run_as_root=True)
+
+
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
-def update_dhcp(context, dev, network_ref):
+def restart_dhcp(dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
@@ -607,8 +625,6 @@ def update_dhcp(context, dev, network_ref):
"""
conffile = _dhcp_file(dev, 'conf')
- with open(conffile, 'w') as f:
- f.write(get_dhcp_hosts(context, network_ref))
if FLAGS.use_single_default_gateway:
optsfile = _dhcp_file(dev, 'opts')
@@ -625,7 +641,9 @@ def update_dhcp(context, dev, network_ref):
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
- if conffile in out:
+ # Using symlinks can cause problems here so just compare the name
+ # of the file itself
+ if conffile.split("/")[-1] in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
return
@@ -830,8 +848,8 @@ def _ip_bridge_cmd(action, params, device):
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
-def plug(network, mac_address):
- return interface_driver.plug(network, mac_address)
+def plug(network, mac_address, gateway=True):
+ return interface_driver.plug(network, mac_address, gateway)
def unplug(network):
@@ -862,7 +880,7 @@ class LinuxNetInterfaceDriver(object):
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
- def plug(self, network, mac_address):
+ def plug(self, network, mac_address, gateway=True):
if network.get('vlan', None) is not None:
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
@@ -874,7 +892,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
network['bridge_interface'],
- network)
+ network, gateway)
return network['bridge']
@@ -900,10 +918,9 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
- _execute('vconfig', 'set_name_type',
- 'VLAN_PLUS_VID_NO_PAD', run_as_root=True)
- _execute('vconfig', 'add', bridge_interface,
- vlan_num, run_as_root=True)
+ _execute('ip', 'link', 'add', 'link', bridge_interface,
+ 'name', interface, 'type', 'vlan',
+ 'id', vlan_num, run_as_root=True)
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
@@ -914,7 +931,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
@classmethod
@utils.synchronized('ensure_bridge', external=True)
- def ensure_bridge(_self, bridge, interface, net_attrs=None):
+ def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
@@ -974,19 +991,28 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
"can't enslave it to bridge %s.\n" % (interface, bridge)):
raise exception.Error('Failed to add interface: %s' % err)
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ # Don't forward traffic unless we were told to be a gateway
+ if gateway:
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % \
bridge)
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % \
bridge)
+ else:
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--in-interface %s -j DROP' % \
+ bridge)
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--out-interface %s -j DROP' % \
+ bridge)
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
- def plug(self, network, mac_address):
- dev = "gw-" + str(network['id'])
+ def plug(self, network, mac_address, gateway=True):
+ dev = "gw-" + str(network['uuid'][0:11])
if not _device_exists(dev):
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
@@ -1002,6 +1028,14 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
_execute('ip', 'link', 'set', dev, "address", mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
+ if not gateway:
+ # If we weren't instructed to act as a gateway then add the
+ # appropriate flows to block all non-dhcp traffic.
+ _execute('ovs-ofctl',
+ 'add-flow', bridge, "priority=1,actions=drop")
+ _execute('ovs-ofctl', 'add-flow', bridge,
+ "udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal" %
+ mac_address)
return dev
@@ -1009,7 +1043,7 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
return self.get_dev(network)
def get_dev(self, network):
- dev = "gw-" + str(network['id'])
+ dev = "gw-" + str(network['uuid'][0:11])
return dev
iptables_manager = IptablesManager()
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 79bb0eda1..96d6dee00 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -95,6 +95,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
+flags.DEFINE_string('gateway', None, 'Default IPv4 gateway')
flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway')
flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
@@ -292,7 +293,7 @@ class FloatingIP(object):
LOG.warn(_('Quota exceeded for %s, tried to allocate '
'address'),
context.project_id)
- raise quota.QuotaError(_('Address quota exceeded. You cannot '
+ raise exception.QuotaError(_('Address quota exceeded. You cannot '
'allocate any more addresses'))
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
@@ -491,6 +492,10 @@ class NetworkManager(manager.SchedulerDependentManager):
network_id,
host=host)
+ def get_dhcp_leases(self, ctxt, network_ref):
+ """Broker the request to the driver to fetch the dhcp leases"""
+ return self.driver.get_dhcp_leases(ctxt, network_ref)
+
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
@@ -512,7 +517,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self.host,
time)
if num:
- LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
+ LOG.debug(_('Disassociated %s stale fixed ip(s)'), num)
def set_network_host(self, context, network_ref):
"""Safely sets the host of the network."""
@@ -651,7 +656,7 @@ class NetworkManager(manager.SchedulerDependentManager):
instance_type_id, host):
"""Creates network info list for instance.
- called by allocate_for_instance and netowrk_api
+ called by allocate_for_instance and network_api
context needs to be elevated
:returns: network info list [(network,info),(network,info)...]
where network = dict containing pertinent data from a network db object
@@ -863,7 +868,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self._setup_network(context, network_ref)
def create_networks(self, context, label, cidr, multi_host, num_networks,
- network_size, cidr_v6, gateway_v6, bridge,
+ network_size, cidr_v6, gateway, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
@@ -947,7 +952,7 @@ class NetworkManager(manager.SchedulerDependentManager):
if cidr and subnet_v4:
net['cidr'] = str(subnet_v4)
net['netmask'] = str(subnet_v4.netmask)
- net['gateway'] = str(subnet_v4[1])
+ net['gateway'] = gateway or str(subnet_v4[1])
net['broadcast'] = str(subnet_v4.broadcast)
net['dhcp_start'] = str(subnet_v4[2])
@@ -971,7 +976,7 @@ class NetworkManager(manager.SchedulerDependentManager):
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
- # NOTE(vish): This makes ports unique accross the cloud, a more
+ # NOTE(vish): This makes ports unique across the cloud, a more
# robust solution would be to make them uniq per ip
net['vpn_public_port'] = kwargs['vpn_start'] + index
@@ -987,9 +992,14 @@ class NetworkManager(manager.SchedulerDependentManager):
self._create_fixed_ips(context, network['id'])
return networks
- def delete_network(self, context, fixed_range, require_disassociated=True):
+ def delete_network(self, context, fixed_range, uuid,
+ require_disassociated=True):
- network = db.network_get_by_cidr(context, fixed_range)
+ # Prefer uuid but we'll also take cidr for backwards compatibility
+ if uuid:
+ network = db.network_get_by_uuid(context.elevated(), uuid)
+ elif fixed_range:
+ network = db.network_get_by_cidr(context.elevated(), fixed_range)
if require_disassociated and network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
@@ -1018,7 +1028,7 @@ class NetworkManager(manager.SchedulerDependentManager):
ips = []
for index in range(num_ips):
address = str(project_net[index])
- if index < bottom_reserved or num_ips - index < top_reserved:
+ if index < bottom_reserved or num_ips - index <= top_reserved:
reserved = True
else:
reserved = False
@@ -1207,6 +1217,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
self.db.fixed_ip_associate(context,
address,
instance_id,
+ network['id'],
reserved=True)
else:
address = kwargs.get('address', None)
diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py
index 40c68dfdc..d3833257b 100644
--- a/nova/network/quantum/client.py
+++ b/nova/network/quantum/client.py
@@ -224,8 +224,6 @@ class Client(object):
type(data)))
def deserialize(self, data, status_code):
- if status_code == 202:
- return data
return JSONSerializer().deserialize(data, self.content_type())
def content_type(self, format=None):
diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py
index 404578474..3cf131ddb 100644
--- a/nova/network/quantum/manager.py
+++ b/nova/network/quantum/manager.py
@@ -15,6 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
+from netaddr import IPNetwork, IPAddress
+
from nova import db
from nova import exception
from nova import flags
@@ -22,6 +26,7 @@ from nova import log as logging
from nova import manager
from nova.network import manager
from nova.network.quantum import quantum_connection
+from nova.network.quantum import melange_ipam_lib
from nova import utils
LOG = logging.getLogger("nova.network.quantum.manager")
@@ -31,6 +36,16 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('quantum_ipam_lib',
'nova.network.quantum.nova_ipam_lib',
"Indicates underlying IP address management library")
+# TODO(Vek): Eventually, this needs to mean more than just using
+# Melange for assignment of MAC addresses (with an
+# appropriate flag name change, of course), but this is all
+# it does right now
+flags.DEFINE_bool('use_melange_mac_generation', False,
+ "Use Melange for assignment of MAC addresses")
+
+
+flags.DEFINE_string('quantum_use_dhcp', 'False',
+ 'Whether or not to enable DHCP for networks')
class QuantumManager(manager.FlatManager):
@@ -43,7 +58,6 @@ class QuantumManager(manager.FlatManager):
Currently, the QuantumManager does NOT support any of the 'gateway'
functionality implemented by the Nova VlanManager, including:
* floating IPs
- * DHCP
* NAT gateway
Support for these capabilities are targted for future releases.
@@ -65,9 +79,14 @@ class QuantumManager(manager.FlatManager):
self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
super(QuantumManager, self).__init__(*args, **kwargs)
+ self.driver.init_host()
+ # TODO(bgh): We'll need to enable these when we implement the full L3
+ # functionalities
+ # self.driver.ensure_metadata_ip()
+ # self.driver.metadata_forward()
def create_networks(self, context, label, cidr, multi_host, num_networks,
- network_size, cidr_v6, gateway_v6, bridge,
+ network_size, cidr_v6, gateway, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, uuid=None,
**kwargs):
"""Unlike other NetworkManagers, with QuantumManager, each
@@ -98,17 +117,36 @@ class QuantumManager(manager.FlatManager):
ipam_tenant_id = kwargs.get("project_id", None)
priority = kwargs.get("priority", 0)
self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id,
- priority, cidr, gateway_v6, cidr_v6, dns1, dns2)
+ priority, cidr, gateway, gateway_v6,
+ cidr_v6, dns1, dns2)
+
+ return [{'uuid': quantum_net_id}]
- def delete_network(self, context, fixed_range):
- """Lookup network by IPv4 cidr, delete both the IPAM
+ def delete_network(self, context, fixed_range, uuid):
+ """Lookup network by uuid, delete both the IPAM
subnet and the corresponding Quantum network.
+
+ The fixed_range parameter is kept here for interface compatibility
+ but is not used.
"""
+ quantum_net_id = uuid
project_id = context.project_id
- quantum_net_id = self.ipam.get_network_id_by_cidr(
- context, fixed_range, project_id)
+ # TODO(bgh): The project_id isn't getting populated here for some
+ # reason.. I'm not sure if it's an invalid assumption or just a bug.
+ # In order to get the right quantum_net_id we'll have to query all the
+ # project_ids for now.
+ if project_id is None:
+ projects = db.project_get_all(context)
+ for p in projects:
+ if self.q_conn.network_exists(p['id'], uuid):
+ project_id = p['id']
+ break
+ if project_id is None:
+ # If nothing was found we default to this
+ project_id = FLAGS.quantum_default_tenant_id
+ LOG.debug("Deleting network for tenant: %s" % project_id)
self.ipam.delete_subnets_by_net_id(context, quantum_net_id,
- project_id)
+ project_id)
q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
self.q_conn.delete_network(q_tenant_id, quantum_net_id)
@@ -139,7 +177,7 @@ class QuantumManager(manager.FlatManager):
instance_type_id = kwargs['instance_type_id']
host = kwargs.pop('host')
project_id = kwargs.pop('project_id')
- LOG.debug(_("network allocations for instance %s"), instance_id)
+ LOG.debug(_("network allocations for instance %s"), project_id)
requested_networks = kwargs.get('requested_networks')
@@ -150,9 +188,17 @@ class QuantumManager(manager.FlatManager):
net_proj_pairs = self.ipam.get_project_and_global_net_ids(context,
project_id)
+ # Quantum may also know about networks that aren't in the networks
+ # table so we need to query Quanutm for any tenant networks and add
+ # them to net_proj_pairs.
+ qnets = self.q_conn.get_networks(project_id)
+ for qn in qnets['networks']:
+ pair = (qn['id'], project_id)
+ if pair not in net_proj_pairs:
+ net_proj_pairs.append(pair)
+
# Create a port via quantum and attach the vif
for (quantum_net_id, project_id) in net_proj_pairs:
-
# FIXME(danwent): We'd like to have the manager be
# completely decoupled from the nova networks table.
# However, other parts of nova sometimes go behind our
@@ -163,22 +209,129 @@ class QuantumManager(manager.FlatManager):
# solution, but this would require significant work
# elsewhere.
admin_context = context.elevated()
+
+ # We may not be able to get a network_ref here if this network
+ # isn't in the database (i.e. it came from Quantum).
network_ref = db.network_get_by_uuid(admin_context,
quantum_net_id)
-
- vif_rec = manager.FlatManager.add_virtual_interface(self,
- context, instance_id, network_ref['id'])
+ if network_ref is None:
+ network_ref = {}
+ network_ref = {"uuid": quantum_net_id,
+ "project_id": project_id,
+ # NOTE(bgh): We need to document this somewhere but since
+ # we don't know the priority of any networks we get from
+ # quantum we just give them a priority of 0. If its
+ # necessary to specify the order of the vifs and what
+ # network they map to then the user will have to use the
+ # OSCreateServer extension and specify them explicitly.
+ #
+ # In the future users will be able to tag quantum networks
+ # with a priority .. and at that point we can update the
+ # code here to reflect that.
+ "priority": 0,
+ "id": 'NULL',
+ "label": "quantum-net-%s" % quantum_net_id}
+
+ vif_rec = self.add_virtual_interface(context,
+ instance_id,
+ network_ref['id'])
# talk to Quantum API to create and attach port.
q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id,
vif_rec['uuid'])
- self.ipam.allocate_fixed_ip(context, project_id, quantum_net_id,
- vif_rec)
-
+ # Tell melange to allocate an IP
+ ip = self.ipam.allocate_fixed_ip(context, project_id,
+ quantum_net_id, vif_rec)
+ # Set up/start the dhcp server for this network if necessary
+ if FLAGS.quantum_use_dhcp:
+ self.enable_dhcp(context, quantum_net_id, network_ref,
+ vif_rec, project_id)
return self.get_instance_nw_info(context, instance_id,
instance_type_id, host)
+ def enable_dhcp(self, context, quantum_net_id, network_ref, vif_rec,
+ project_id):
+ LOG.info("Using DHCP for network: %s" % network_ref['label'])
+ # Figure out the ipam tenant id for this subnet: We need to
+ # query for the tenant_id since the network could be created
+ # with the project_id as the tenant or the default tenant.
+ ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
+ quantum_net_id, vif_rec['uuid'], project_id)
+ # Figure out what subnets correspond to this network
+ v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
+ ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
+ # Set up (or find) the dhcp server for each of the subnets
+ # returned above (both v4 and v6).
+ for subnet in [v4_subnet, v6_subnet]:
+ if subnet is None or subnet['cidr'] is None:
+ continue
+ # Fill in some of the network fields that we would have
+ # previously gotten from the network table (they'll be
+ # passed to the linux_net functions).
+ network_ref['cidr'] = subnet['cidr']
+ n = IPNetwork(subnet['cidr'])
+ network_ref['dhcp_server'] = IPAddress(n.first + 1)
+ # TODO(bgh): Melange should probably track dhcp_start
+ if not 'dhcp_start' in network_ref or \
+ network_ref['dhcp_start'] is None:
+ network_ref['dhcp_start'] = IPAddress(n.first + 2)
+ network_ref['broadcast'] = IPAddress(n.broadcast)
+ network_ref['gateway'] = subnet['gateway']
+ # Construct the interface id that we'll use for the bridge
+ interface_id = "gw-" + str(network_ref['uuid'][0:11])
+ network_ref['bridge'] = interface_id
+ # Query quantum to see if we've already created a port for
+ # the gateway device and attached the device to the port.
+ # If we haven't then we need to intiialize it and create
+ # it. This device will be the one serving dhcp via
+ # dnsmasq.
+ q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
+ port = self.q_conn.get_port_by_attachment(q_tenant_id,
+ quantum_net_id, interface_id)
+ if not port: # No dhcp server has been started
+ mac_address = self.generate_mac_address()
+ dev = self.driver.plug(network_ref, mac_address,
+ gateway=(network_ref['gateway'] != None))
+ self.driver.initialize_gateway_device(dev, network_ref)
+ LOG.debug("Intializing DHCP for network: %s" %
+ network_ref)
+ self.q_conn.create_and_attach_port(q_tenant_id,
+ quantum_net_id, interface_id)
+ else: # We've already got one and its plugged in
+ dev = interface_id
+
+ hosts = self.get_dhcp_hosts_text(context,
+ subnet['network_id'], project_id)
+ self.driver.update_dhcp_hostfile_with_text(dev, hosts)
+ self.driver.restart_dhcp(dev, network_ref)
+
+ def add_virtual_interface(self, context, instance_id, network_id):
+ # If we're not using melange, use the default means...
+ if FLAGS.use_melange_mac_generation:
+ return self._add_virtual_interface(context, instance_id,
+ network_id)
+
+ return super(QuantumManager, self).add_virtual_interface(context,
+ instance_id,
+ network_id)
+
+ def _add_virtual_interface(self, context, instance_id, network_id):
+ vif = {'instance_id': instance_id,
+ 'network_id': network_id,
+ 'uuid': str(utils.gen_uuid())}
+
+ # TODO(Vek): Ideally, we would have a VirtualInterface class
+ # that would take care of delegating to whoever it
+ # needs to get information from. We'll look at
+ # this after Trey's refactorings...
+ m_ipam = melange_ipam_lib.get_ipam_lib(self)
+ vif['address'] = m_ipam.create_vif(vif['uuid'],
+ vif['instance_id'],
+ context.project_id)
+
+ return self.db.virtual_interface_create(context, vif)
+
def get_instance_nw_info(self, context, instance_id,
instance_type_id, host):
"""This method is used by compute to fetch all network data
@@ -201,15 +354,9 @@ class QuantumManager(manager.FlatManager):
vifs = db.virtual_interface_get_by_instance(admin_context,
instance_id)
for vif in vifs:
- q_tenant_id = project_id
- ipam_tenant_id = project_id
- net_id, port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
- vif['uuid'])
- if not net_id:
- q_tenant_id = FLAGS.quantum_default_tenant_id
- ipam_tenant_id = None
- net_id, port_id = self.q_conn.get_port_by_attachment(
- q_tenant_id, vif['uuid'])
+ net = db.network_get(admin_context, vif['network_id'])
+ net_id = net['uuid']
+
if not net_id:
# TODO(bgh): We need to figure out a way to tell if we
# should actually be raising this exception or not.
@@ -219,8 +366,13 @@ class QuantumManager(manager.FlatManager):
# probably just log, continue, and move on.
raise Exception(_("No network for for virtual interface %s") %
vif['uuid'])
- (v4_subnet, v6_subnet) = self.ipam.get_subnets_by_net_id(context,
- ipam_tenant_id, net_id)
+
+ ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
+ net_id, vif['uuid'], project_id)
+ v4_subnet, v6_subnet = \
+ self.ipam.get_subnets_by_net_id(context,
+ ipam_tenant_id, net_id, vif['uuid'])
+
v4_ips = self.ipam.get_v4_ips_by_interface(context,
net_id, vif['uuid'],
project_id=ipam_tenant_id)
@@ -228,8 +380,6 @@ class QuantumManager(manager.FlatManager):
net_id, vif['uuid'],
project_id=ipam_tenant_id)
- quantum_net_id = v4_subnet['network_id'] or v6_subnet['network_id']
-
def ip_dict(ip, subnet):
return {
"ip": ip,
@@ -285,24 +435,35 @@ class QuantumManager(manager.FlatManager):
for vif_ref in vifs:
interface_id = vif_ref['uuid']
q_tenant_id = project_id
- ipam_tenant_id = project_id
- (net_id, port_id) = self.q_conn.get_port_by_attachment(q_tenant_id,
- interface_id)
- if not net_id:
+
+ network_ref = db.network_get(admin_context, vif_ref['network_id'])
+ net_id = network_ref['uuid']
+
+ port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
+ net_id, interface_id)
+ if not port_id:
q_tenant_id = FLAGS.quantum_default_tenant_id
- ipam_tenant_id = None
- (net_id, port_id) = self.q_conn.get_port_by_attachment(
- q_tenant_id, interface_id)
- if not net_id:
+ port_id = self.q_conn.get_port_by_attachment(
+ q_tenant_id, net_id, interface_id)
+
+ if not port_id:
LOG.error("Unable to find port with attachment: %s" %
(interface_id))
- continue
- self.q_conn.detach_and_delete_port(q_tenant_id,
- net_id, port_id)
+ else:
+ self.q_conn.detach_and_delete_port(q_tenant_id,
+ net_id, port_id)
+
+ ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
+ net_id, vif_ref['uuid'], project_id)
self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
net_id, vif_ref)
+ # If DHCP is enabled on this network then we need to update the
+ # leases and restart the server.
+ if FLAGS.quantum_use_dhcp:
+ self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref,
+ project_id)
try:
db.virtual_interface_delete_by_instance(admin_context,
instance_id)
@@ -310,6 +471,37 @@ class QuantumManager(manager.FlatManager):
LOG.error(_("Attempted to deallocate non-existent instance: %s" %
(instance_id)))
+ # TODO(bgh): At some point we should consider merging enable_dhcp() and
+ # update_dhcp()
+ def update_dhcp(self, context, ipam_tenant_id, network_ref, vif_ref,
+ project_id):
+ # Figure out what subnet corresponds to this network/vif
+ v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
+ ipam_tenant_id, network_ref['uuid'], vif_ref['uuid'])
+ for subnet in [v4_subnet, v6_subnet]:
+ if subnet is None:
+ continue
+ # Fill in some of the network fields that we would have
+ # previously gotten from the network table (they'll be
+ # passed to the linux_net functions).
+ network_ref['cidr'] = subnet['cidr']
+ n = IPNetwork(subnet['cidr'])
+ network_ref['dhcp_server'] = IPAddress(n.first + 1)
+ network_ref['dhcp_start'] = IPAddress(n.first + 2)
+ network_ref['broadcast'] = IPAddress(n.broadcast)
+ network_ref['gateway'] = IPAddress(n.first + 1)
+ dev = "gw-" + str(network_ref['uuid'][0:11])
+ # And remove the dhcp mappings for the subnet
+ hosts = self.get_dhcp_hosts_text(context,
+ subnet['network_id'], project_id)
+ self.driver.update_dhcp_hostfile_with_text(dev, hosts)
+ # Restart dnsmasq
+ self.driver.kill_dhcp(dev)
+ self.driver.restart_dhcp(dev, network_ref)
+
+ # TODO(bgh): if this is the last instance for the network
+ # then we should actually just kill the dhcp server.
+
def validate_networks(self, context, networks):
"""Validates that this tenant has quantum networks with the associated
UUIDs. This is called by the 'os-create-server-ext' API extension
@@ -321,6 +513,50 @@ class QuantumManager(manager.FlatManager):
project_id = context.project_id
for (net_id, _i) in networks:
- self.ipam.verify_subnet_exists(context, project_id, net_id)
+ # TODO(bgh): At some point we should figure out whether or
+ # not we want the verify_subnet_exists call to be optional.
+ if not self.ipam.verify_subnet_exists(context, project_id,
+ net_id):
+ raise exception.NetworkNotFound(network_id=net_id)
if not self.q_conn.network_exists(project_id, net_id):
raise exception.NetworkNotFound(network_id=net_id)
+
+ # NOTE(bgh): deallocate_for_instance will take care of this.. The reason
+ # we're providing this is so that NetworkManager::release_fixed_ip() isn't
+ # called. It does some database operations that we don't want to happen
+ # and since the majority of the stuff that it does is already taken care
+ # of in our deallocate_for_instance call we don't need to do anything.
+ def release_fixed_ip(self, context, address):
+ pass
+
+ def get_dhcp_hosts_text(self, context, subnet_id, project_id=None):
+ ips = self.ipam.get_allocated_ips(context, subnet_id, project_id)
+ hosts_text = ""
+ admin_context = context.elevated()
+ for ip in ips:
+ address, vif_id = ip
+ vif = db.virtual_interface_get_by_uuid(admin_context, vif_id)
+ mac_address = vif['address']
+ text = "%s,%s.%s,%s\n" % (mac_address, "host-" + address,
+ FLAGS.dhcp_domain, address)
+ hosts_text += text
+ LOG.debug("DHCP hosts: %s" % hosts_text)
+ return hosts_text
+
+ def get_dhcp_leases(self, context, network_ref):
+ """Return a network's hosts config in dnsmasq leasefile format."""
+ subnet_id = network_ref['uuid']
+ project_id = network_ref['project_id']
+ ips = self.ipam.get_allocated_ips(context, subnet_id, project_id)
+ leases_text = ""
+ admin_context = context.elevated()
+ for ip in ips:
+ address, vif_id = ip
+ vif = db.virtual_interface_get_by_uuid(admin_context, vif_id)
+ mac_address = vif['address']
+ text = "%s %s %s %s *\n" % \
+ (int(time.time()) - FLAGS.dhcp_lease_time,
+ mac_address, address, '*')
+ leases_text += text
+ LOG.debug("DHCP leases: %s" % leases_text)
+ return leases_text
diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py
index 71ac9b5f1..a336f9a7c 100644
--- a/nova/network/quantum/melange_connection.py
+++ b/nova/network/quantum/melange_connection.py
@@ -66,11 +66,12 @@ class MelangeConnection(object):
else:
return httplib.HTTPConnection(self.host, self.port)
- def do_request(self, method, path, body=None, headers=None, params=None):
+ def do_request(self, method, path, body=None, headers=None, params=None,
+ content_type=".json"):
headers = headers or {}
params = params or {}
- url = "/%s/%s.json" % (self.version, path)
+ url = "/%s/%s%s" % (self.version, path, content_type)
if params:
url += "?%s" % urllib.urlencode(params)
try:
@@ -98,13 +99,14 @@ class MelangeConnection(object):
return json.loads(response)['ip_addresses']
def create_block(self, network_id, cidr,
- project_id=None, dns1=None, dns2=None):
+ project_id=None, gateway=None, dns1=None, dns2=None):
tenant_scope = "/tenants/%s" % project_id if project_id else ""
url = "ipam%(tenant_scope)s/ip_blocks" % locals()
req_params = dict(ip_block=dict(cidr=cidr, network_id=network_id,
- type='private', dns1=dns1, dns2=dns2))
+ type='private', gateway=gateway,
+ dns1=dns1, dns2=dns2))
self.post(url, body=json.dumps(req_params),
headers=json_content_type)
@@ -132,6 +134,14 @@ class MelangeConnection(object):
response = self.get(url, headers=json_content_type)
return json.loads(response)['ip_addresses']
+ def get_allocated_ips_for_network(self, network_id, project_id=None):
+ tenant_scope = "/tenants/%s" % project_id if project_id else ""
+ url = ("ipam%(tenant_scope)s/allocated_ip_addresses" % locals())
+ # TODO(bgh): This request fails if you add the ".json" to the end so
+ # it has to call do_request itself. Melange bug?
+ response = self.do_request("GET", url, content_type="")
+ return json.loads(response)['ip_addresses']
+
def deallocate_ips(self, network_id, vif_id, project_id=None):
tenant_scope = "/tenants/%s" % project_id if project_id else ""
@@ -139,3 +149,14 @@ class MelangeConnection(object):
"interfaces/%(vif_id)s/ip_allocations" % locals())
self.delete(url, headers=json_content_type)
+
+ def create_vif(self, vif_id, instance_id, project_id=None):
+ url = "ipam/interfaces"
+
+ request_body = dict(interface=dict(id=vif_id, tenant_id=project_id,
+ device_id=instance_id))
+
+ response = self.post(url, body=json.dumps(request_body),
+ headers=json_content_type)
+
+ return json.loads(response)['interface']['mac_address']
diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py
index a0ac10fd3..155384b53 100644
--- a/nova/network/quantum/melange_ipam_lib.py
+++ b/nova/network/quantum/melange_ipam_lib.py
@@ -15,8 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from netaddr import IPNetwork
-
+from netaddr import IPNetwork, IPAddress
from nova import db
from nova import exception
from nova import flags
@@ -45,7 +44,7 @@ class QuantumMelangeIPAMLib(object):
def create_subnet(self, context, label, project_id,
quantum_net_id, priority, cidr=None,
- gateway_v6=None, cidr_v6=None,
+ gateway=None, gateway_v6=None, cidr_v6=None,
dns1=None, dns2=None):
"""Contact Melange and create a subnet for any non-NULL
IPv4 or IPv6 subnets.
@@ -59,25 +58,34 @@ class QuantumMelangeIPAMLib(object):
if cidr:
self.m_conn.create_block(quantum_net_id, cidr,
project_id=tenant_id,
+ gateway=gateway,
dns1=dns1, dns2=dns2)
if cidr_v6:
self.m_conn.create_block(quantum_net_id, cidr_v6,
project_id=tenant_id,
+ gateway=gateway_v6,
dns1=dns1, dns2=dns2)
net = {"uuid": quantum_net_id,
"project_id": project_id,
"priority": priority,
"label": label}
+ if FLAGS.quantum_use_dhcp:
+ if cidr:
+ n = IPNetwork(cidr)
+ net['dhcp_start'] = IPAddress(n.first + 2)
+ else:
+ net['dhcp_start'] = None
admin_context = context.elevated()
network = db.network_create_safe(admin_context, net)
def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref):
"""Pass call to allocate fixed IP on to Melange"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
- self.m_conn.allocate_ip(quantum_net_id,
- vif_ref['uuid'], project_id=tenant_id,
- mac_address=vif_ref['address'])
+ ip = self.m_conn.allocate_ip(quantum_net_id,
+ vif_ref['uuid'], project_id=tenant_id,
+ mac_address=vif_ref['address'])
+ return ip[0]['address']
def get_network_id_by_cidr(self, context, cidr, project_id):
"""Find the Quantum UUID associated with a IPv4 CIDR
@@ -86,6 +94,7 @@ class QuantumMelangeIPAMLib(object):
tenant_id = project_id or FLAGS.quantum_default_tenant_id
all_blocks = self.m_conn.get_blocks(tenant_id)
for b in all_blocks['ip_blocks']:
+ LOG.debug("block: %s" % b)
if b['cidr'] == cidr:
return b['network_id']
raise exception.NotFound(_("No network found for cidr %s" % cidr))
@@ -134,34 +143,43 @@ class QuantumMelangeIPAMLib(object):
return [(network_id, tenant_id)
for priority, network_id, tenant_id in priority_nets]
- def get_subnets_by_net_id(self, context, project_id, net_id):
+ def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id):
+ ipam_tenant_id = None
+ tenant_ids = [FLAGS.quantum_default_tenant_id, project_id, None]
+ for tid in tenant_ids:
+ try:
+ ips = self.m_conn.get_allocated_ips(net_id, vif_id, tid)
+ except Exception, e:
+ continue
+ ipam_tenant_id = tid
+ break
+ return ipam_tenant_id
+
+ # TODO(bgh): Rename this method .. it's now more of a
+ # "get_subnets_by_net_id_and_vif_id" method, but we could probably just
+ # call it "get_subnets".
+ def get_subnets_by_net_id(self, context, tenant_id, net_id, vif_id):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Quantum Network UUID.
"""
-
- # FIXME(danwent): Melange actually returns the subnet info
- # when we query for a particular interface. We may want to
- # rework the ipam_manager python API to let us take advantage of
- # this, as right now we have to get all blocks and cycle through
- # them.
subnet_v4 = None
subnet_v6 = None
- tenant_id = project_id or FLAGS.quantum_default_tenant_id
- all_blocks = self.m_conn.get_blocks(tenant_id)
- for b in all_blocks['ip_blocks']:
- if b['network_id'] == net_id:
- subnet = {'network_id': b['network_id'],
- 'cidr': b['cidr'],
- 'gateway': b['gateway'],
- 'broadcast': b['broadcast'],
- 'netmask': b['netmask'],
- 'dns1': b['dns1'],
- 'dns2': b['dns2']}
-
- if IPNetwork(b['cidr']).version == 6:
- subnet_v6 = subnet
- else:
- subnet_v4 = subnet
+ ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
+
+ for ip_address in ips:
+ block = ip_address['ip_block']
+ print block
+ subnet = {'network_id': block['id'],
+ 'cidr': block['cidr'],
+ 'gateway': block['gateway'],
+ 'broadcast': block['broadcast'],
+ 'netmask': block['netmask'],
+ 'dns1': block['dns1'],
+ 'dns2': block['dns2']}
+ if ip_address['version'] == 4:
+ subnet_v4 = subnet
+ else:
+ subnet_v6 = subnet
return (subnet_v4, subnet_v6)
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
@@ -179,7 +197,7 @@ class QuantumMelangeIPAMLib(object):
project_id, 6)
def _get_ips_by_interface(self, context, net_id, vif_id, project_id,
- ip_version):
+ ip_version):
"""Helper method to fetch v4 or v6 addresses for a particular
virtual interface.
"""
@@ -192,10 +210,16 @@ class QuantumMelangeIPAMLib(object):
"""Confirms that a subnet exists that is associated with the
specified Quantum Network UUID.
"""
+ # TODO(bgh): Would be nice if we could just do something like:
+ # GET /ipam/tenants/{tenant_id}/networks/{network_id}/ instead
+ # of searching through all the blocks. Checking for a 404
+ # will then determine whether it exists.
tenant_id = project_id or FLAGS.quantum_default_tenant_id
- v4_subnet, v6_subnet = self.get_subnets_by_net_id(context, tenant_id,
- quantum_net_id)
- return v4_subnet is not None
+ all_blocks = self.m_conn.get_blocks(tenant_id)
+ for b in all_blocks['ip_blocks']:
+ if b['network_id'] == quantum_net_id:
+ return True
+ return False
def deallocate_ips_by_vif(self, context, project_id, net_id, vif_ref):
"""Deallocate all fixed IPs associated with the specified
@@ -203,3 +227,13 @@ class QuantumMelangeIPAMLib(object):
"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
self.m_conn.deallocate_ips(net_id, vif_ref['uuid'], tenant_id)
+
+ def get_allocated_ips(self, context, subnet_id, project_id):
+ ips = self.m_conn.get_allocated_ips_for_network(subnet_id, project_id)
+ return [(ip['address'], ip['interface_id']) for ip in ips]
+
+ def create_vif(self, vif_id, instance_id, project_id=None):
+ """Create a new vif with the specified information.
+ """
+ tenant_id = project_id or FLAGS.quantum_default_tenant_id
+ return self.m_conn.create_vif(vif_id, instance_id, tenant_id)
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
index 21dee8f6a..ded5bef58 100644
--- a/nova/network/quantum/nova_ipam_lib.py
+++ b/nova/network/quantum/nova_ipam_lib.py
@@ -51,7 +51,7 @@ class QuantumNovaIPAMLib(object):
def create_subnet(self, context, label, tenant_id,
quantum_net_id, priority, cidr=None,
- gateway_v6=None, cidr_v6=None,
+ gateway=None, gateway_v6=None, cidr_v6=None,
dns1=None, dns2=None):
"""Re-use the basic FlatManager create_networks method to
initialize the networks and fixed_ips tables in Nova DB.
@@ -63,7 +63,7 @@ class QuantumNovaIPAMLib(object):
subnet_size = len(netaddr.IPNetwork(cidr))
networks = manager.FlatManager.create_networks(self.net_manager,
admin_context, label, cidr,
- False, 1, subnet_size, cidr_v6,
+ False, 1, subnet_size, cidr_v6, gateway,
gateway_v6, quantum_net_id, None, dns1, dns2)
if len(networks) != 1:
@@ -93,7 +93,8 @@ class QuantumNovaIPAMLib(object):
if not network:
raise Exception(_("No network with net_id = %s" % net_id))
manager.FlatManager.delete_network(self.net_manager,
- admin_context, network['cidr'],
+ admin_context, None,
+ network['uuid'],
require_disassociated=False)
def get_project_and_global_net_ids(self, context, project_id):
@@ -116,6 +117,7 @@ class QuantumNovaIPAMLib(object):
"""Allocates a single fixed IPv4 address for a virtual interface."""
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, quantum_net_id)
+ address = None
if network['cidr']:
address = db.fixed_ip_associate_pool(admin_context,
network['id'],
@@ -123,8 +125,15 @@ class QuantumNovaIPAMLib(object):
values = {'allocated': True,
'virtual_interface_id': vif_rec['id']}
db.fixed_ip_update(admin_context, address, values)
+ return address
- def get_subnets_by_net_id(self, context, tenant_id, net_id):
+ def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id):
+ """Returns tenant_id for this network. This is only necessary
+ in the melange IPAM case.
+ """
+ return project_id
+
+ def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Quantum Network UUID.
"""
@@ -176,7 +185,8 @@ class QuantumNovaIPAMLib(object):
such subnet exists.
"""
admin_context = context.elevated()
- db.network_get_by_uuid(admin_context, quantum_net_id)
+ net = db.network_get_by_uuid(admin_context, quantum_net_id)
+ return net is not None
def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
"""Deallocate all fixed IPs associated with the specified
@@ -193,3 +203,20 @@ class QuantumNovaIPAMLib(object):
except exception.FixedIpNotFoundForInstance:
LOG.error(_('No fixed IPs to deallocate for vif %s' %
vif_ref['id']))
+
+ def get_allocated_ips(self, context, subnet_id, project_id):
+ """Returns a list of (ip, vif_id) pairs"""
+ admin_context = context.elevated()
+ ips = db.fixed_ip_get_all(admin_context)
+ allocated_ips = []
+ # Get all allocated IPs that are part of this subnet
+ network = db.network_get_by_uuid(admin_context, subnet_id)
+ for ip in ips:
+ # Skip unallocated IPs
+ if not ip['allocated'] == 1:
+ continue
+ if ip['network_id'] == network['id']:
+ vif = db.virtual_interface_get(admin_context,
+ ip['virtual_interface_id'])
+ allocated_ips.append((ip['address'], vif['uuid']))
+ return allocated_ips
diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py
index ce07bc1ab..91c98797c 100644
--- a/nova/network/quantum/quantum_connection.py
+++ b/nova/network/quantum/quantum_connection.py
@@ -79,6 +79,10 @@ class QuantumClientConnection(object):
# Not really an error. Real errors will be propogated to caller
return False
+ def get_networks(self, tenant_id):
+ """Retrieve all networks for this tenant"""
+ return self.client.list_networks(tenant=tenant_id)
+
def create_and_attach_port(self, tenant_id, net_id, interface_id):
"""Creates a Quantum port on the specified network, sets
status to ACTIVE to enable traffic, and attaches the
@@ -102,21 +106,20 @@ class QuantumClientConnection(object):
self.client.detach_resource(net_id, port_id, tenant=tenant_id)
self.client.delete_port(net_id, port_id, tenant=tenant_id)
- def get_port_by_attachment(self, tenant_id, attachment_id):
- """Given a tenant, search for the Quantum network and port
- UUID that has the specified interface-id attachment.
+ def get_port_by_attachment(self, tenant_id, net_id, attachment_id):
+ """Given a tenant and network, search for the port UUID that
+ has the specified interface-id attachment.
"""
# FIXME(danwent): this will be inefficient until the Quantum
# API implements querying a port by the interface-id
- net_list_resdict = self.client.list_networks(tenant=tenant_id)
- for n in net_list_resdict["networks"]:
- net_id = n['id']
- port_list_resdict = self.client.list_ports(net_id,
- tenant=tenant_id)
- for p in port_list_resdict["ports"]:
- port_id = p["id"]
- port_get_resdict = self.client.show_port_attachment(net_id,
+ port_list_resdict = self.client.list_ports(net_id, tenant=tenant_id)
+ for p in port_list_resdict["ports"]:
+ port_id = p["id"]
+ port_get_resdict = self.client.show_port_attachment(net_id,
port_id, tenant=tenant_id)
- if attachment_id == port_get_resdict["attachment"]["id"]:
- return (net_id, port_id)
- return (None, None)
+ # Skip ports without an attachment
+ if "id" not in port_get_resdict["attachment"]:
+ continue
+ if attachment_id == port_get_resdict["attachment"]["id"]:
+ return port_id
+ return None
diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py
index 955ae1b57..62847c85f 100644
--- a/nova/notifier/list_notifier.py
+++ b/nova/notifier/list_notifier.py
@@ -53,7 +53,7 @@ def _get_drivers():
def notify(message):
- """Passes notification to mulitple notifiers in a list."""
+ """Passes notification to multiple notifiers in a list."""
for driver in _get_drivers():
try:
driver.notify(message)
diff --git a/nova/quota.py b/nova/quota.py
index 771477747..e9ec4142f 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -162,8 +162,3 @@ def allowed_injected_file_content_bytes(context, requested_bytes):
def allowed_injected_file_path_bytes(context):
"""Return the number of bytes allowed in an injected file path."""
return FLAGS.quota_max_injected_file_path_bytes
-
-
-class QuotaError(exception.ApiError):
- """Quota Exceeded."""
- pass
diff --git a/nova/rpc/common.py b/nova/rpc/common.py
index 120d6ad14..a7597d29b 100644
--- a/nova/rpc/common.py
+++ b/nova/rpc/common.py
@@ -13,10 +13,10 @@ flags.DEFINE_integer('rpc_conn_pool_size', 30,
class RemoteError(exception.NovaException):
"""Signifies that a remote class has raised an exception.
- Containes a string representation of the type of the original exception,
+ Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
- contains all of the relevent info.
+ contains all of the relevant info.
"""
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py
index 303a4ff88..2a518d7d7 100644
--- a/nova/rpc/impl_carrot.py
+++ b/nova/rpc/impl_carrot.py
@@ -274,7 +274,7 @@ class AdapterConsumer(Consumer):
@exception.wrap_exception()
def _process_data(self, msg_id, ctxt, method, args):
- """Thread that maigcally looks for a method on the proxy
+ """Thread that magically looks for a method on the proxy
object and calls it.
"""
diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py
index e7fc491ea..74c8cf412 100644
--- a/nova/rpc/impl_kombu.py
+++ b/nova/rpc/impl_kombu.py
@@ -319,7 +319,7 @@ class Connection(object):
self.reconnect()
def reconnect(self):
- """Handles reconnecting and re-estblishing queues"""
+ """Handles reconnecting and re-establishing queues"""
if self.connection:
try:
self.connection.close()
@@ -609,7 +609,7 @@ class ProxyCallback(object):
@exception.wrap_exception()
def _process_data(self, ctxt, method, args):
- """Thread that maigcally looks for a method on the proxy
+ """Thread that magically looks for a method on the proxy
object and calls it.
"""
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 0b91edccc..dae5b1d8f 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -60,7 +60,7 @@ def get_host_list(context):
def get_zone_list(context):
- """Return a list of zones assoicated with this zone."""
+ """Return a list of zones associated with this zone."""
items = _call_scheduler('get_zone_list', context)
for item in items:
item['api_url'] = item['api_url'].replace('\\/', '/')
@@ -332,6 +332,12 @@ class reroute_compute(object):
context and resource id. Derived class should override this."""
context = kwargs.get('context', None)
instance_id = kwargs.get('instance_id', None)
+
+ #NOTE(blamar): This is going to get worse before it gets better...
+ instance = kwargs.get('instance', None)
+ if instance is not None:
+ instance_id = instance['uuid']
+
if len(args) > 0 and not context:
context = args[1]
if len(args) > 1 and not instance_id:
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 8a69f2d0f..e387ab862 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -23,6 +23,7 @@ Chance (Random) Scheduler implementation
import random
+from nova import exception
from nova.scheduler import driver
@@ -35,9 +36,8 @@ class ChanceScheduler(driver.Scheduler):
elevated = context.elevated()
hosts = self.hosts_up(elevated, topic)
if not hosts:
- raise driver.NoValidHost(_("Scheduler was unable to locate a host"
- " for this request. Is the appropriate"
- " service running?"))
+ msg = _("Is the appropriate service running?")
+ raise exception.NoValidHost(reason=msg)
return hosts[int(random.random() * len(hosts))]
def schedule(self, context, topic, method, *_args, **kwargs):
diff --git a/nova/scheduler/distributed_scheduler.py b/nova/scheduler/distributed_scheduler.py
index db1e1ed41..b8fbd55fc 100644
--- a/nova/scheduler/distributed_scheduler.py
+++ b/nova/scheduler/distributed_scheduler.py
@@ -25,21 +25,21 @@ import types
import M2Crypto
+from nova.compute import api as compute_api
from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions
-
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
-
-from nova.compute import api as compute_api
from nova.scheduler import api
from nova.scheduler import driver
from nova.scheduler import filters
from nova.scheduler import least_cost
+from nova.scheduler import scheduler_options
+from nova import utils
FLAGS = flags.FLAGS
@@ -59,6 +59,10 @@ class DistributedScheduler(driver.Scheduler):
"""Scheduler that can work across any nova deployment, from simple
deployments to multiple nested zones.
"""
+ def __init__(self, *args, **kwargs):
+ super(DistributedScheduler, self).__init__(*args, **kwargs)
+ self.cost_function_cache = {}
+ self.options = scheduler_options.SchedulerOptions()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
@@ -67,7 +71,7 @@ class DistributedScheduler(driver.Scheduler):
NOTE: We're only focused on compute instances right now,
so this method will always raise NoValidHost()."""
msg = _("No host selection for %s defined." % topic)
- raise driver.NoValidHost(msg)
+ raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
"""This method is called from nova.compute.api to provision
@@ -99,7 +103,7 @@ class DistributedScheduler(driver.Scheduler):
*args, **kwargs)
if not weighted_hosts:
- raise driver.NoValidHost(_('No hosts were available'))
+ raise exception.NoValidHost(reason=_(""))
instances = []
for num in xrange(num_instances):
@@ -243,6 +247,10 @@ class DistributedScheduler(driver.Scheduler):
"""Broken out for testing."""
return db.zone_get_all(context)
+ def _get_configuration_options(self):
+ """Fetch options dictionary. Broken out for testing."""
+ return self.options.get_configuration()
+
def _schedule(self, elevated, topic, request_spec, *args, **kwargs):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
@@ -257,9 +265,13 @@ class DistributedScheduler(driver.Scheduler):
"provisioning.")
raise NotImplementedError(msg)
+ cost_functions = self.get_cost_functions()
+
ram_requirement_mb = instance_type['memory_mb']
disk_requirement_bg = instance_type['local_gb']
+ options = self._get_configuration_options()
+
# Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent
@@ -274,7 +286,7 @@ class DistributedScheduler(driver.Scheduler):
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
filtered_hosts = self._filter_hosts(topic, request_spec,
- unfiltered_hosts)
+ unfiltered_hosts, options)
if not filtered_hosts:
# Can't get any more locally.
@@ -284,8 +296,8 @@ class DistributedScheduler(driver.Scheduler):
# weighted_host = WeightedHost() ... the best
# host for the job.
- weighted_host = least_cost.weigh_hosts(request_spec,
- filtered_hosts)
+ weighted_host = least_cost.weighted_sum(cost_functions,
+ filtered_hosts, options)
LOG.debug(_("Weighted %(weighted_host)s") % locals())
selected_hosts.append(weighted_host)
@@ -343,7 +355,7 @@ class DistributedScheduler(driver.Scheduler):
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
- def _filter_hosts(self, topic, request_spec, hosts=None):
+ def _filter_hosts(self, topic, request_spec, hosts, options):
"""Filter the full host list. hosts = [(host, HostInfo()), ...].
This method returns a subset of hosts, in the same format."""
selected_filters = self._choose_host_filters()
@@ -358,6 +370,48 @@ class DistributedScheduler(driver.Scheduler):
for selected_filter in selected_filters:
query = selected_filter.instance_type_to_filter(instance_type)
- hosts = selected_filter.filter_hosts(hosts, query)
+ hosts = selected_filter.filter_hosts(hosts, query, options)
return hosts
+
+ def get_cost_functions(self, topic=None):
+ """Returns a list of tuples containing weights and cost functions to
+ use for weighing hosts
+ """
+ if topic is None:
+ # Schedulers only support compute right now.
+ topic = "compute"
+ if topic in self.cost_function_cache:
+ return self.cost_function_cache[topic]
+
+ cost_fns = []
+ for cost_fn_str in FLAGS.least_cost_functions:
+ if '.' in cost_fn_str:
+ short_name = cost_fn_str.split('.')[-1]
+ else:
+ short_name = cost_fn_str
+ cost_fn_str = "%s.%s.%s" % (
+ __name__, self.__class__.__name__, short_name)
+ if not (short_name.startswith('%s_' % topic) or
+ short_name.startswith('noop')):
+ continue
+
+ try:
+ # NOTE: import_class is somewhat misnamed since
+ # the weighing function can be any non-class callable
+ # (i.e., no 'self')
+ cost_fn = utils.import_class(cost_fn_str)
+ except exception.ClassNotFound:
+ raise exception.SchedulerCostFunctionNotFound(
+ cost_fn_str=cost_fn_str)
+
+ try:
+ flag_name = "%s_weight" % cost_fn.__name__
+ weight = getattr(FLAGS, flag_name)
+ except AttributeError:
+ raise exception.SchedulerWeightFlagNotFound(
+ flag_name=flag_name)
+ cost_fns.append((weight, cost_fn))
+
+ self.cost_function_cache[topic] = cost_fns
+ return cost_fns
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 7c79d28c9..5041c3346 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -36,7 +36,7 @@ from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.scheduler.driver')
flags.DEFINE_integer('service_down_time', 60,
- 'maximum time since last checkin for up service')
+ 'maximum time since last check-in for up service')
flags.DECLARE('instances_path', 'nova.compute.manager')
@@ -115,18 +115,8 @@ def encode_instance(instance, local=True):
return instance
-class NoValidHost(exception.Error):
- """There is no valid host for the command."""
- pass
-
-
-class WillNotSchedule(exception.Error):
- """The specified host is not up or doesn't exist."""
- pass
-
-
class Scheduler(object):
- """The base class that all Scheduler clases should inherit from."""
+ """The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.zone_manager = None
@@ -155,6 +145,9 @@ class Scheduler(object):
def create_instance_db_entry(self, context, request_spec):
"""Create instance DB entry based on request_spec"""
base_options = request_spec['instance_properties']
+ if base_options.get('id'):
+ # Instance was already created before calling scheduler
+ return db.instance_get(context, base_options['id'])
image = request_spec['image']
instance_type = request_spec.get('instance_type')
security_group = request_spec.get('security_group', 'default')
@@ -418,7 +411,7 @@ class Scheduler(object):
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host, key):
- """get compute node's infomation specified by key
+ """get compute node's information specified by key
:param context: security context
:param host: hostname(must be compute node)
diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py
index 5784f8791..45fcd3ea1 100644
--- a/nova/scheduler/filters/abstract_filter.py
+++ b/nova/scheduler/filters/abstract_filter.py
@@ -23,7 +23,7 @@ class AbstractHostFilter(object):
"""Convert instance_type into a filter for most common use-case."""
raise NotImplementedError()
- def filter_hosts(self, host_list, query):
+ def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that fulfill the filter."""
raise NotImplementedError()
diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py
index 7e6d2573b..6ca4c281e 100644
--- a/nova/scheduler/filters/all_hosts_filter.py
+++ b/nova/scheduler/filters/all_hosts_filter.py
@@ -26,6 +26,6 @@ class AllHostsFilter(abstract_filter.AbstractHostFilter):
"""
return instance_type
- def filter_hosts(self, host_list, query):
+ def filter_hosts(self, host_list, query, options):
"""Return the entire list of supplied hosts."""
return list(host_list)
diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py
index 2104924d7..7e05cb9e7 100644
--- a/nova/scheduler/filters/instance_type_filter.py
+++ b/nova/scheduler/filters/instance_type_filter.py
@@ -51,7 +51,7 @@ class InstanceTypeFilter(abstract_filter.AbstractHostFilter):
free_ram_mb = host_info.free_ram_mb
return free_ram_mb >= requested_ram
- def filter_hosts(self, host_list, query):
+ def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can create instance_type."""
instance_type = query
selected_hosts = []
diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py
index 5eea38bc7..692a330bb 100644
--- a/nova/scheduler/filters/json_filter.py
+++ b/nova/scheduler/filters/json_filter.py
@@ -134,7 +134,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
result = method(self, cooked_args)
return result
- def filter_hosts(self, host_list, query):
+ def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 83dc087ab..7e12ca390 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -23,11 +23,8 @@ is then selected for provisioning.
"""
-import collections
-
from nova import flags
from nova import log as logging
-from nova import utils
from nova import exception
LOG = logging.getLogger('nova.scheduler.least_cost')
@@ -46,9 +43,6 @@ flags.DEFINE_float('compute_fill_first_cost_fn_weight', 1.0,
'How much weight to give the fill-first cost function')
-COST_FUNCTION_CACHE = {}
-
-
class WeightedHost(object):
"""Reduced set of information about a host that has been weighed.
This is an attempt to remove some of the ad-hoc dict structures
@@ -74,36 +68,18 @@ class WeightedHost(object):
return x
-def noop_cost_fn(host_info):
+def noop_cost_fn(host_info, options=None):
"""Return a pre-weight cost of 1 for each host"""
return 1
-def compute_fill_first_cost_fn(host_info):
+def compute_fill_first_cost_fn(host_info, options=None):
"""More free ram = higher weight. So servers will less free
ram will be preferred."""
return host_info.free_ram_mb
-def normalize_grid(grid):
- """Normalize a grid of numbers by row."""
- if not grid:
- return [[]]
-
- normalized = []
- for row in grid:
- if not row:
- normalized.append([])
- continue
- mx = float(max(row))
- if abs(mx) < 0.001:
- normalized = [0.0] * len(row)
- continue
- normalized.append([float(col) / mx for col in row])
- return normalized
-
-
-def weighted_sum(host_list, weighted_fns):
+def weighted_sum(weighted_fns, host_list, options):
"""Use the weighted-sum method to compute a score for an array of objects.
Normalize the results of the objective-functions so that the weights are
meaningful regardless of objective-function's range.
@@ -111,6 +87,7 @@ def weighted_sum(host_list, weighted_fns):
host_list - [(host, HostInfo()), ...]
weighted_fns - list of weights and functions like:
[(weight, objective-functions), ...]
+ options is an arbitrary dict of values.
Returns a single WeightedHost object which represents the best
candidate.
@@ -120,8 +97,8 @@ def weighted_sum(host_list, weighted_fns):
# One row per host. One column per function.
scores = []
for weight, fn in weighted_fns:
- scores.append([fn(host_info) for hostname, host_info in host_list])
- scores = normalize_grid(scores)
+ scores.append([fn(host_info, options) for hostname, host_info
+ in host_list])
# Adjust the weights in the grid by the functions weight adjustment
# and sum them up to get a final list of weights.
@@ -143,54 +120,3 @@ def weighted_sum(host_list, weighted_fns):
final_scores = sorted(final_scores)
weight, (host, hostinfo) = final_scores[0] # Lowest score is the winner!
return WeightedHost(weight, host=host, hostinfo=hostinfo)
-
-
-def get_cost_fns(topic=None):
- """Returns a list of tuples containing weights and cost functions to
- use for weighing hosts
- """
- global COST_FUNCTION_CACHE
- cost_function_cache = COST_FUNCTION_CACHE
-
- if topic is None:
- # Schedulers only support compute right now.
- topic = "compute"
- if topic in cost_function_cache:
- return cost_function_cache[topic]
-
- cost_fns = []
- for cost_fn_str in FLAGS.least_cost_functions:
- if '.' in cost_fn_str:
- short_name = cost_fn_str.split('.')[-1]
- else:
- short_name = cost_fn_str
- cost_fn_str = "%s.%s.%s" % (
- __name__, self.__class__.__name__, short_name)
- if not (short_name.startswith('%s_' % topic) or
- short_name.startswith('noop')):
- continue
-
- try:
- # NOTE(sirp): import_class is somewhat misnamed since it can
- # any callable from a module
- cost_fn = utils.import_class(cost_fn_str)
- except exception.ClassNotFound:
- raise exception.SchedulerCostFunctionNotFound(
- cost_fn_str=cost_fn_str)
-
- try:
- flag_name = "%s_weight" % cost_fn.__name__
- weight = getattr(FLAGS, flag_name)
- except AttributeError:
- raise exception.SchedulerWeightFlagNotFound(
- flag_name=flag_name)
- cost_fns.append((weight, cost_fn))
-
- cost_function_cache[topic] = cost_fns
- return cost_fns
-
-
-def weigh_hosts(request_spec, host_list):
- """Returns the best host as a WeightedHost."""
- cost_fns = get_cost_fns()
- return weighted_sum(host_list, cost_fns)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index f5a268adb..0f973341e 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -66,7 +66,7 @@ class SchedulerManager(manager.Manager):
return self.zone_manager.get_zone_list()
def get_zone_capabilities(self, context=None):
- """Get the normalized set of capabilites for this zone."""
+ """Get the normalized set of capabilities for this zone."""
return self.zone_manager.get_zone_capabilities(context)
def update_service_capabilities(self, context=None, service_name=None,
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
new file mode 100644
index 000000000..61966dc50
--- /dev/null
+++ b/nova/scheduler/scheduler_options.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+SchedulerOptions monitors a local .json file for changes and loads
+it if needed. This file is converted to a data structure and passed
+into the filtering and weighing functions which can use it for
+dynamic configuration.
+"""
+
+import datetime
+import json
+import os
+
+from nova import flags
+from nova import log as logging
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('scheduler_json_config_location',
+ '',
+ 'Absolute path to scheduler configuration JSON file.')
+
+LOG = logging.getLogger('nova.scheduler.scheduler_options')
+
+
+class SchedulerOptions(object):
+ """
+ SchedulerOptions monitors a local .json file for changes and loads it
+ if needed. This file is converted to a data structure and passed into
+ the filtering and weighing functions which can use it for dynamic
+ configuration.
+ """
+
+ def __init__(self):
+ super(SchedulerOptions, self).__init__()
+ self.data = {}
+ self.last_modified = None
+ self.last_checked = None
+
+ def _get_file_handle(self, filename):
+ """Get file handle. Broken out for testing."""
+ return open(filename)
+
+ def _get_file_timestamp(self, filename):
+ """Get the last modified datetime. Broken out for testing."""
+ try:
+ return os.path.getmtime(filename)
+ except os.error, e:
+ LOG.exception(_("Could not stat scheduler options file "
+ "%(filename)s: '%(e)s'", locals()))
+ raise
+
+ def _load_file(self, handle):
+ """Decode the JSON file. Broken out for testing."""
+ try:
+ return json.load(handle)
+ except ValueError, e:
+ LOG.exception(_("Could not decode scheduler options: "
+ "'%(e)s'") % locals())
+ return {}
+
+ def _get_time_now(self):
+ """Get current UTC. Broken out for testing."""
+ return datetime.datetime.utcnow()
+
+ def get_configuration(self, filename=None):
+ """Check the json file for changes and load it if needed."""
+ if not filename:
+ filename = FLAGS.scheduler_json_config_location
+ if not filename:
+ return self.data
+ if self.last_checked:
+ now = self._get_time_now()
+ if now - self.last_checked < datetime.timedelta(minutes=5):
+ return self.data
+
+ last_modified = self._get_file_timestamp(filename)
+ if not last_modified or not self.last_modified or \
+ last_modified > self.last_modified:
+ self.data = self._load_file(self._get_file_handle(filename))
+ self.last_modified = last_modified
+ if not self.data:
+ self.data = {}
+
+ return self.data
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index cce1509b8..8f993d9df 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -23,7 +23,7 @@ Simple Scheduler
from nova import db
from nova import flags
-from nova import utils
+from nova import exception
from nova.scheduler import driver
from nova.scheduler import chance
@@ -41,28 +41,33 @@ class SimpleScheduler(chance.ChanceScheduler):
def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
+ elevated = context.elevated()
availability_zone = instance_opts.get('availability_zone')
- if availability_zone and context.is_admin and \
- (':' in availability_zone):
- zone, host = availability_zone.split(':', 1)
- service = db.service_get_by_args(context.elevated(), host,
- 'nova-compute')
+ zone, host = None, None
+ if availability_zone:
+ zone, _x, host = availability_zone.partition(':')
+
+ if host and context.is_admin:
+ service = db.service_get_by_args(elevated, host, 'nova-compute')
if not self.service_is_up(service):
- raise driver.WillNotSchedule(_("Host %s is not alive") % host)
+ raise exception.WillNotSchedule(host=host)
return host
- results = db.service_get_all_compute_sorted(context)
+ results = db.service_get_all_compute_sorted(elevated)
+ if zone:
+ results = [(service, cores) for (service, cores) in results
+ if service['availability_zone'] == zone]
for result in results:
(service, instance_cores) = result
if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
- raise driver.NoValidHost(_("All hosts have too many cores"))
+ msg = _("All hosts have too many cores")
+ raise exception.NoValidHost(reason=msg)
if self.service_is_up(service):
return service['host']
- raise driver.NoValidHost(_("Scheduler was unable to locate a host"
- " for this request. Is the appropriate"
- " service running?"))
+ msg = _("Is the appropriate service running?")
+ raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *_args, **_kwargs):
num_instances = request_spec.get('num_instances', 1)
@@ -82,48 +87,55 @@ class SimpleScheduler(chance.ChanceScheduler):
host = self._schedule_instance(context, instance_ref,
*_args, **_kwargs)
driver.cast_to_compute_host(context, host, 'start_instance',
- instance_id=intance_id, **_kwargs)
+ instance_id=instance_id, **_kwargs)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
+ elevated = context.elevated()
+
volume_ref = db.volume_get(context, volume_id)
- if (volume_ref['availability_zone']
- and ':' in volume_ref['availability_zone']
- and context.is_admin):
- zone, _x, host = volume_ref['availability_zone'].partition(':')
- service = db.service_get_by_args(context.elevated(), host,
- 'nova-volume')
+ availability_zone = volume_ref.get('availability_zone')
+
+ zone, host = None, None
+ if availability_zone:
+ zone, _x, host = availability_zone.partition(':')
+ if host and context.is_admin:
+ service = db.service_get_by_args(elevated, host, 'nova-volume')
if not self.service_is_up(service):
- raise driver.WillNotSchedule(_("Host %s not available") % host)
+ raise exception.WillNotSchedule(host=host)
driver.cast_to_volume_host(context, host, 'create_volume',
volume_id=volume_id, **_kwargs)
return None
- results = db.service_get_all_volume_sorted(context)
+
+ results = db.service_get_all_volume_sorted(elevated)
+ if zone:
+ results = [(service, gigs) for (service, gigs) in results
+ if service['availability_zone'] == zone]
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
- raise driver.NoValidHost(_("All hosts have too many "
- "gigabytes"))
+ msg = _("All hosts have too many gigabytes")
+ raise exception.NoValidHost(reason=msg)
if self.service_is_up(service):
driver.cast_to_volume_host(context, service['host'],
'create_volume', volume_id=volume_id, **_kwargs)
return None
- raise driver.NoValidHost(_("Scheduler was unable to locate a host"
- " for this request. Is the appropriate"
- " service running?"))
+ msg = _("Is the appropriate service running?")
+ raise exception.NoValidHost(reason=msg)
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
+ elevated = context.elevated()
- results = db.service_get_all_network_sorted(context)
+ results = db.service_get_all_network_sorted(elevated)
for result in results:
(service, instance_count) = result
if instance_count >= FLAGS.max_networks:
- raise driver.NoValidHost(_("All hosts have too many networks"))
+ msg = _("All hosts have too many networks")
+ raise exception.NoValidHost(reason=msg)
if self.service_is_up(service):
driver.cast_to_network_host(context, service['host'],
'set_network_host', **_kwargs)
return None
- raise driver.NoValidHost(_("Scheduler was unable to locate a host"
- " for this request. Is the appropriate"
- " service running?"))
+ msg = _("Is the appropriate service running?")
+ raise exception.NoValidHost(reason=msg)
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
index da4475004..7c1670954 100644
--- a/nova/scheduler/vsa.py
+++ b/nova/scheduler/vsa.py
@@ -25,6 +25,7 @@ from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
+from nova import exception
from nova.scheduler import driver
from nova.scheduler import simple
from nova.vsa.api import VsaState
@@ -173,7 +174,7 @@ class VsaScheduler(simple.SimpleScheduler):
selected_hosts,
unique)
if host is None:
- raise driver.WillNotSchedule(_("No available hosts"))
+ raise exception.NoValidHost(reason=_(""))
return (host, qos_cap)
@@ -216,7 +217,7 @@ class VsaScheduler(simple.SimpleScheduler):
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
- raise driver.WillNotSchedule(_("Host %s not available") % host)
+ raise exception.WillNotSchedule(host=host)
return host
else:
diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py
deleted file mode 100644
index c369477f8..000000000
--- a/nova/scheduler/zone.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 Openstack, LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Availability Zone Scheduler implementation
-"""
-
-import random
-
-from nova.scheduler import driver
-from nova import db
-
-
-class ZoneScheduler(driver.Scheduler):
- """Implements Scheduler as a random node selector."""
-
- def hosts_up_with_zone(self, context, topic, zone):
- """Return the list of hosts that have a running service
- for topic and availability zone (if defined).
- """
-
- if not zone:
- return self.hosts_up(context, topic)
-
- services = db.service_get_all_by_topic(context, topic)
- return [service.host
- for service in services
- if self.service_is_up(service)
- and service.availability_zone == zone]
-
- def _schedule(self, context, topic, request_spec, **kwargs):
- """Picks a host that is up at random in selected
- availability zone (if defined).
- """
-
- zone = kwargs.get('availability_zone')
- if not zone and request_spec:
- zone = request_spec['instance_properties'].get(
- 'availability_zone')
- hosts = self.hosts_up_with_zone(context.elevated(), topic, zone)
- if not hosts:
- raise driver.NoValidHost(_("Scheduler was unable to locate a host"
- " for this request. Is the appropriate"
- " service running?"))
- return hosts[int(random.random() * len(hosts))]
-
- def schedule(self, context, topic, method, *_args, **kwargs):
- host = self._schedule(context, topic, None, **kwargs)
- driver.cast_to_host(context, topic, host, method, **kwargs)
-
- def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
- """Builds and starts instances on selected hosts"""
- num_instances = request_spec.get('num_instances', 1)
- instances = []
- for num in xrange(num_instances):
- host = self._schedule(context, 'compute', request_spec, **kwargs)
- instance = self.create_instance_db_entry(context, request_spec)
- driver.cast_to_compute_host(context, host,
- 'run_instance', instance_id=instance['id'], **kwargs)
- instances.append(driver.encode_instance(instance))
- return instances
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index 3ccd5015e..0410cfb1d 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -22,9 +22,8 @@ import thread
import traceback
import UserDict
-from novaclient import v1_1 as novaclient
-
from eventlet import greenpool
+from novaclient import v1_1 as novaclient
from nova import db
from nova import flags
@@ -33,9 +32,13 @@ from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_integer('zone_db_check_interval', 60,
- 'Seconds between getting fresh zone info from db.')
+ 'Seconds between getting fresh zone info from db.')
flags.DEFINE_integer('zone_failures_to_offline', 3,
- 'Number of consecutive errors before marking zone offline')
+ 'Number of consecutive errors before marking zone offline')
+flags.DEFINE_integer('reserved_host_disk_mb', 0,
+ 'Amount of disk in MB to reserve for host/dom0')
+flags.DEFINE_integer('reserved_host_memory_mb', 512,
+ 'Amount of memory in MB to reserve for host/dom0')
class ZoneState(object):
@@ -228,17 +231,26 @@ class ZoneManager(object):
for compute in compute_nodes:
all_disk = compute['local_gb']
all_ram = compute['memory_mb']
- host = compute['service']['host']
+ service = compute['service']
+ if not service:
+ logging.warn(_("No service for compute ID %s") % compute['id'])
+ continue
+ host = service['host']
caps = self.service_states.get(host, None)
- host_info_map[host] = HostInfo(host, caps=caps,
- free_disk_gb=all_disk,
- free_ram_mb=all_ram)
+ host_info = HostInfo(host, caps=caps,
+ free_disk_gb=all_disk, free_ram_mb=all_ram)
+ # Reserve resources for host/dom0
+ host_info.consume_resources(FLAGS.reserved_host_disk_mb * 1024,
+ FLAGS.reserved_host_memory_mb)
+ host_info_map[host] = host_info
# "Consume" resources from the host the instance resides on.
instances = self._instance_get_all(context)
for instance in instances:
host = instance['host']
+ if not host:
+ continue
host_info = host_info_map.get(host, None)
if not host_info:
continue
diff --git a/nova/test.py b/nova/test.py
index be259ce27..abd1294d4 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -69,7 +69,7 @@ class skip_test(object):
class skip_if(object):
- """Decorator that skips a test if contition is true."""
+ """Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
@@ -152,7 +152,7 @@ class TestCase(unittest.TestCase):
if FLAGS.image_service == 'nova.image.fake.FakeImageService':
nova.image.fake.FakeImageService_reset()
- # Reset any overriden flags
+ # Reset any overridden flags
self.reset_flags()
# Stop any timers
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 720d5b0e6..1109dfb70 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -63,6 +63,7 @@ def setup():
num_networks=FLAGS.num_networks,
network_size=FLAGS.network_size,
cidr_v6=FLAGS.fixed_range_v6,
+ gateway=FLAGS.gateway,
gateway_v6=FLAGS.gateway_v6,
bridge=FLAGS.flat_network_bridge,
bridge_interface=bridge_interface,
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index c0f3d44d7..e70f7b832 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1013,8 +1013,6 @@ class CloudTestCase(test.TestCase):
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
- self.stubs.UnsetAll()
-
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
@@ -1216,6 +1214,9 @@ class CloudTestCase(test.TestCase):
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ # NOTE(comstud): Make 'cast' behave like a 'call' which will
+ # ensure that operations complete
+ self.stubs.Set(rpc, 'cast', rpc.call)
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
@@ -1368,8 +1369,62 @@ class CloudTestCase(test.TestCase):
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertTrue(result)
+ def test_start_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertTrue(result)
self._restart_compute_service()
+ def test_stop_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertTrue(result)
+ self._restart_compute_service()
+
+ def test_terminate_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started. It is just ignored.
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertTrue(result)
+ self._restart_compute_service()
+
+ def test_reboot_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started. It is just ignored.
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ result = self.cloud.reboot_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
def _volume_create(self, volume_id=None):
kwargs = {'status': 'available',
'host': self.volume.host,
diff --git a/nova/tests/api/ec2/test_faults.py b/nova/tests/api/ec2/test_faults.py
new file mode 100644
index 000000000..be2b5ffe2
--- /dev/null
+++ b/nova/tests/api/ec2/test_faults.py
@@ -0,0 +1,34 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova import test
+from nova.api.ec2 import faults
+
+
+class TestFaults(test.TestCase):
+ """Tests covering ec2 Fault class."""
+
+ def test_fault_exception(self):
+ """Ensure the status_int is set correctly on faults"""
+ fault = faults.Fault(webob.exc.HTTPBadRequest(
+ explanation='test'))
+ self.assertTrue(isinstance(fault.wrapped_exc,
+ webob.exc.HTTPBadRequest))
+
+ def test_fault_exception_status_int(self):
+ """Ensure the status_int is set correctly on faults"""
+ fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
+ self.assertEquals(fault.wrapped_exc.status_int, 404)
diff --git a/nova/tests/api/openstack/contrib/test_admin_actions.py b/nova/tests/api/openstack/contrib/test_admin_actions.py
index 8b5697609..84df0f9bc 100644
--- a/nova/tests/api/openstack/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/contrib/test_admin_actions.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import json
import webob
@@ -23,11 +24,32 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
+INSTANCE = {
+ "id": 1,
+ "name": "fake",
+ "display_name": "test_server",
+ "uuid": "abcd",
+ "user_id": 'fake_user_id',
+ "tenant_id": 'fake_tenant_id',
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "security_groups": [{"id": 1, "name": "test"}],
+ "progress": 0,
+ "image_ref": 'http://foo.com/123',
+ "fixed_ips": [],
+ "instance_type": {"flavorid": '124'},
+ }
+
def fake_compute_api(cls, req, id):
return True
+def return_server_by_id(context, id, session=None):
+ INSTANCE['id'] = id
+ return INSTANCE
+
+
class AdminActionsTest(test.TestCase):
_actions = ('pause', 'unpause', 'suspend', 'resume', 'migrate',
@@ -41,6 +63,7 @@ class AdminActionsTest(test.TestCase):
self.flags(allow_admin_api=True)
for _method in self._methods:
self.stubs.Set(compute.API, _method, fake_compute_api)
+ self.stubs.Set(compute.API, 'get', return_server_by_id)
def test_admin_api_enabled(self):
app = fakes.wsgi_app()
diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py
index fe1a7a94f..f7ee96917 100644
--- a/nova/tests/api/openstack/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/contrib/test_createserverext.py
@@ -138,11 +138,6 @@ class CreateserverextTest(test.TestCase):
compute_api = MockComputeAPI()
self.stubs.Set(nova.compute, 'API',
self._make_stub_method(compute_api))
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- self.stubs.Set(
- nova.api.openstack.servers.Controller,
- '_get_kernel_ramdisk_from_image',
- self._make_stub_method((image_uuid, image_uuid)))
return compute_api
def _setup_mock_network_api(self):
diff --git a/nova/tests/api/openstack/contrib/test_extendedstatus.py b/nova/tests/api/openstack/contrib/test_extendedstatus.py
new file mode 100644
index 000000000..8dafd9c7d
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_extendedstatus.py
@@ -0,0 +1,109 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import json
+import webob
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import image
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
+ ('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
+
+DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
+
+INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
+
+INSTANCE = {
+ "id": 1,
+ "name": "fake",
+ "display_name": "test_server",
+ "uuid": FAKE_UUID,
+ "user_id": 'fake_user_id',
+ "task_state": "kayaking",
+ "vm_state": "slightly crunchy",
+ "power_state": "empowered",
+ "tenant_id": 'fake_tenant_id',
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "security_groups": [{"id": 1, "name": "test"}],
+ "progress": 0,
+ "image_ref": 'http://foo.com/123',
+ "fixed_ips": [],
+ "instance_type": {"flavorid": '124'},
+ }
+
+
+class ExtendedStatusTest(test.TestCase):
+
+ def setUp(self):
+ super(ExtendedStatusTest, self).setUp()
+ self.uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+ self.url = '/v1.1/openstack/servers/%s' % self.uuid
+ fakes.stub_out_nw_api(self.stubs)
+
+ def test_extended_status_with_admin(self):
+ def fake_compute_get(*args, **kwargs):
+ return INSTANCE
+
+ self.flags(allow_admin_api=True)
+ self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
+ req = webob.Request.blank(self.url)
+ req.headers['Accept'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ body = json.loads(res.body)
+ self.assertEqual(body['server']['OS-EXT-STS:vm_state'],
+ 'slightly crunchy')
+ self.assertEqual(body['server']['OS-EXT-STS:power_state'], 'empowered')
+ self.assertEqual(body['server']['OS-EXT-STS:task_state'], 'kayaking')
+
+ def test_extended_status_no_admin(self):
+ def fake_compute_get(*args, **kwargs):
+ return INSTANCE
+
+ self.flags(allow_admin_api=False)
+ self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
+ req = webob.Request.blank(self.url)
+ req.headers['Accept'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ body = json.loads(res.body)
+ self.assertEqual(body['server'].get('OS-EXT-STS:vm_state'), None)
+ self.assertEqual(body['server'].get('OS-EXT-STS:power_state'), None)
+ self.assertEqual(body['server'].get('OS-EXT-STS:task_state'), None)
+
+ def test_extended_status_no_instance_fails(self):
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound()
+
+ self.flags(allow_admin_api=True)
+ self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
+ req = webob.Request.blank(self.url)
+ req.headers['Accept'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/test_flavors_extra_specs.py b/nova/tests/api/openstack/contrib/test_flavors_extra_specs.py
index 766bb2d45..5784743ee 100644
--- a/nova/tests/api/openstack/test_flavors_extra_specs.py
+++ b/nova/tests/api/openstack/contrib/test_flavors_extra_specs.py
@@ -24,6 +24,7 @@ import os.path
from nova import test
from nova.api import openstack
from nova.api.openstack import extensions
+from nova.api.openstack.contrib import flavorextraspecs
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -59,119 +60,112 @@ class FlavorsExtraSpecsTest(test.TestCase):
def setUp(self):
super(FlavorsExtraSpecsTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
+ self.controller = flavorextraspecs.FlavorExtraSpecsController()
def test_index(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
- request = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs')
- res = request.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs')
+ res_dict = self.controller.index(req, 1)
+
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs')
+ res_dict = self.controller.index(req, 1)
+
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/key5')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs' +
+ '/key5')
+ res_dict = self.controller.show(req, 1, 'key5')
+
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/key6')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(404, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs' +
+ '/key6')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1, 'key6')
def test_delete(self):
self.stubs.Set(nova.db, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/key5')
- req.method = 'DELETE'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs' +
+ '/key5')
+ self.controller.delete(req, 1, 'key5')
def test_create(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs')
- req.method = 'POST'
- req.body = '{"extra_specs": {"key1": "value1"}}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
+ body = {"extra_specs": {"key1": "value1"}}
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs')
+ res_dict = self.controller.create(req, 1, body)
+
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_create_empty_body(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs')
- req.method = 'POST'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, 1, '')
def test_update_item(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/key1')
- req.method = 'PUT'
- req.body = '{"key1": "value1"}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
- res_dict = json.loads(res.body)
+ body = {"key1": "value1"}
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs' +
+ '/key1')
+ res_dict = self.controller.update(req, 1, 'key1', body)
+
self.assertEqual('value1', res_dict['key1'])
def test_update_item_empty_body(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/key1')
- req.method = 'PUT'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs' +
+ '/key1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'key1', '')
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/key1')
- req.method = 'PUT'
- req.body = '{"key1": "value1", "key2": "value2"}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+ body = {"key1": "value1", "key2": "value2"}
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs' +
+ '/key1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/v1.1/123/flavors/1/os-extra_specs/bad')
- req.method = 'PUT'
- req.body = '{"key1": "value1"}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+ body = {"key1": "value1"}
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/flavors/1/os-extra_specs/bad')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'bad', body)
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index e0008c3f8..63831f31f 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -27,7 +27,7 @@ from nova.tests.api.openstack import fakes
from nova.tests.api.openstack import test_servers
-from nova.api.openstack.contrib.floating_ips import FloatingIPController
+from nova.api.openstack.contrib import floating_ips
from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view
@@ -94,6 +94,11 @@ def fake_instance_get(context, instance_id):
"project_id": '123'}
+class StubExtensionManager(object):
+ def register(self, *args):
+ pass
+
+
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
@@ -127,6 +132,9 @@ class FloatingIpTest(test.TestCase):
self.context = context.get_admin_context()
self._create_floating_ip()
+ self.controller = floating_ips.FloatingIPController()
+ self.manager = floating_ips.Floating_ips(StubExtensionManager())
+
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
@@ -148,10 +156,9 @@ class FloatingIpTest(test.TestCase):
self.assertTrue('floating_ip' in view)
def test_floating_ips_list(self):
- req = webob.Request.blank('/v1.1/123/os-floating-ips')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-floating-ips')
+ res_dict = self.controller.index(req)
+
response = {'floating_ips': [{'instance_id': 1,
'ip': '10.10.10.10',
'fixed_ip': '10.0.0.1',
@@ -163,10 +170,9 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res_dict, response)
def test_floating_ip_show(self):
- req = webob.Request.blank('/v1.1/123/os-floating-ips/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-floating-ips/1')
+ res_dict = self.controller.show(req, 1)
+
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
@@ -177,10 +183,9 @@ class FloatingIpTest(test.TestCase):
'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
- req = webob.Request.blank('/v1.1/123/os-floating-ips/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-floating-ips/1')
+ res_dict = self.controller.show(req, 1)
+
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['instance_id'], 1)
@@ -191,11 +196,10 @@ class FloatingIpTest(test.TestCase):
raise(rpc.RemoteError('NoMoreFloatingIps', '', ''))
self.stubs.Set(rpc, "call", fake_call)
- req = webob.Request.blank('/v1.1/123/os-floating-ips')
- req.method = 'POST'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-floating-ips')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
@@ -208,12 +212,11 @@ class FloatingIpTest(test.TestCase):
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
- req = webob.Request.blank('/v1.1/123/os-floating-ips')
- req.method = 'POST'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- ip = json.loads(res.body)['floating_ip']
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-floating-ips')
+ res_dict = self.controller.create(req)
+
+ ip = res_dict['floating_ip']
expected = {
"id": 1,
@@ -223,61 +226,45 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
- req = webob.Request.blank('/v1.1/123/os-floating-ips/1')
- req.method = 'DELETE'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-floating-ips/1')
+ self.controller.delete(req, 1)
# test floating ip add/remove -> associate/disassociate
def test_floating_ip_associate(self):
body = dict(addFloatingIp=dict(address=self.address))
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
- req.method = "POST"
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = req.get_response(fakes.wsgi_app())
- self.assertEqual(resp.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/test_inst/action')
+ self.manager._add_floating_ip(body, req, 'test_inst')
def test_floating_ip_disassociate(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
- req.method = "POST"
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = req.get_response(fakes.wsgi_app())
- self.assertEqual(resp.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/test_inst/action')
+ self.manager._remove_floating_ip(body, req, 'test_inst')
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
- req.method = "POST"
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = req.get_response(fakes.wsgi_app())
- self.assertEqual(resp.status_int, 400)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, body, req,
+ 'test_inst')
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
- req.method = "POST"
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = req.get_response(fakes.wsgi_app())
- self.assertEqual(resp.status_int, 400)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._remove_floating_ip, body, req,
+ 'test_inst')
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
- req.method = "POST"
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = req.get_response(fakes.wsgi_app())
- self.assertEqual(resp.status_int, 400)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, body, req,
+ 'test_inst')
diff --git a/nova/tests/api/openstack/contrib/test_multinic_xs.py b/nova/tests/api/openstack/contrib/test_multinic_xs.py
index cecc4af4f..90999a384 100644
--- a/nova/tests/api/openstack/contrib/test_multinic_xs.py
+++ b/nova/tests/api/openstack/contrib/test_multinic_xs.py
@@ -23,20 +23,25 @@ from nova import test
from nova.tests.api.openstack import fakes
+UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
last_add_fixed_ip = (None, None)
last_remove_fixed_ip = (None, None)
-def compute_api_add_fixed_ip(self, context, instance_id, network_id):
+def compute_api_add_fixed_ip(self, context, instance, network_id):
global last_add_fixed_ip
- last_add_fixed_ip = (instance_id, network_id)
+ last_add_fixed_ip = (instance['uuid'], network_id)
-def compute_api_remove_fixed_ip(self, context, instance_id, address):
+def compute_api_remove_fixed_ip(self, context, instance, address):
global last_remove_fixed_ip
- last_remove_fixed_ip = (instance_id, address)
+ last_remove_fixed_ip = (instance['uuid'], address)
+
+
+def compute_api_get(self, context, instance_id):
+ return {'id': 1, 'uuid': instance_id}
class FixedIpTest(test.TestCase):
@@ -48,6 +53,7 @@ class FixedIpTest(test.TestCase):
compute_api_add_fixed_ip)
self.stubs.Set(compute.api.API, "remove_fixed_ip",
compute_api_remove_fixed_ip)
+ self.stubs.Set(compute.api.API, 'get', compute_api_get)
self.context = context.get_admin_context()
def test_add_fixed_ip(self):
@@ -55,21 +61,21 @@ class FixedIpTest(test.TestCase):
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict(networkId='test_net'))
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req = webob.Request.blank('/v1.1/123/servers/%s/action' % UUID)
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
- self.assertEqual(last_add_fixed_ip, ('test_inst', 'test_net'))
+ self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
def test_add_fixed_ip_no_network(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict())
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req = webob.Request.blank('/v1.1/123/servers/%s/action' % UUID)
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
@@ -83,21 +89,21 @@ class FixedIpTest(test.TestCase):
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict(address='10.10.10.1'))
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req = webob.Request.blank('/v1.1/123/servers/%s/action' % UUID)
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
- self.assertEqual(last_remove_fixed_ip, ('test_inst', '10.10.10.1'))
+ self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
def test_remove_fixed_ip_no_address(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict())
- req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req = webob.Request.blank('/v1.1/123/servers/%s/action' % UUID)
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py
index 7faef08b2..6374dfd93 100644
--- a/nova/tests/api/openstack/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/contrib/test_quotas.py
@@ -79,12 +79,10 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_defaults(self):
uri = '/v1.1/fake_tenant/os-quota-sets/fake_tenant/defaults'
- req = webob.Request.blank(uri)
- req.method = 'GET'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
+ req = fakes.HTTPRequest.blank(uri)
+ res_dict = self.controller.defaults(req, 'fake_tenant')
+
expected = {'quota_set': {
'id': 'fake_tenant',
'instances': 10,
@@ -97,57 +95,40 @@ class QuotaSetsTest(test.TestCase):
'injected_files': 5,
'injected_file_content_bytes': 10240}}
- self.assertEqual(json.loads(res.body), expected)
+ self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
- req = webob.Request.blank('/v1.1/1234/os-quota-sets/1234')
- req.method = 'GET'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app(
- fake_auth_context=self.admin_context))
+ req = fakes.HTTPRequest.blank('/v1.1/1234/os-quota-sets/1234',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 1234)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(json.loads(res.body), quota_set('1234'))
+ self.assertEqual(res_dict, quota_set('1234'))
def test_quotas_show_as_unauthorized_user(self):
- req = webob.Request.blank('/v1.1/fake/os-quota-sets/1234')
- req.method = 'GET'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app(
- fake_auth_context=self.user_context))
-
- self.assertEqual(res.status_int, 403)
+ req = fakes.HTTPRequest.blank('/v1.1/1234/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 1234)
def test_quotas_update_as_admin(self):
- updated_quota_set = {'quota_set': {'instances': 50,
- 'cores': 50, 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
- 'injected_file_content_bytes': 10240}}
-
- req = webob.Request.blank('/v1.1/1234/os-quota-sets/update_me')
- req.method = 'PUT'
- req.body = json.dumps(updated_quota_set)
- req.headers['Content-Type'] = 'application/json'
+ body = {'quota_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'volumes': 10,
+ 'gigabytes': 1000, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
- res = req.get_response(fakes.wsgi_app(
- fake_auth_context=self.admin_context))
+ req = fakes.HTTPRequest.blank('/v1.1/1234/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body)
- self.assertEqual(json.loads(res.body), updated_quota_set)
+ self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
- updated_quota_set = {'quota_set': {'instances': 50,
- 'cores': 50, 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
- 'metadata_items': 128, 'injected_files': 5,
- 'injected_file_content_bytes': 10240}}
-
- req = webob.Request.blank('/v1.1/1234/os-quota-sets/update_me')
- req.method = 'PUT'
- req.body = json.dumps(updated_quota_set)
- req.headers['Content-Type'] = 'application/json'
-
- res = req.get_response(fakes.wsgi_app(
- fake_auth_context=self.user_context))
-
- self.assertEqual(res.status_int, 403)
+ body = {'quota_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'volumes': 10,
+ 'gigabytes': 1000, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ req = fakes.HTTPRequest.blank('/v1.1/1234/os-quota-sets/update_me')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'update_me', body)
diff --git a/nova/tests/api/openstack/contrib/test_rescue.py b/nova/tests/api/openstack/contrib/test_rescue.py
index 403bcfd4c..f5b69865c 100644
--- a/nova/tests/api/openstack/contrib/test_rescue.py
+++ b/nova/tests/api/openstack/contrib/test_rescue.py
@@ -23,17 +23,23 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-def rescue(self, context, instance_id, rescue_password=None):
+def rescue(self, context, instance, rescue_password=None):
pass
-def unrescue(self, context, instance_id):
+def unrescue(self, context, instance):
pass
class RescueTest(test.TestCase):
def setUp(self):
super(RescueTest, self).setUp()
+
+ def fake_compute_get(*args, **kwargs):
+ uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+ return {'id': 1, 'uuid': uuid}
+
+ self.stubs.Set(compute.api.API, "get", fake_compute_get)
self.stubs.Set(compute.api.API, "rescue", rescue)
self.stubs.Set(compute.api.API, "unrescue", unrescue)
diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py
index 65e60df7f..f55ce4a55 100644
--- a/nova/tests/api/openstack/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/contrib/test_security_groups.py
@@ -30,31 +30,44 @@ from nova.tests.api.openstack import fakes
FAKE_UUID = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
-def _get_create_request_json(body_dict):
- req = webob.Request.blank('/v1.1/123/os-security-groups')
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body_dict)
- return req
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
-def _create_security_group_json(security_group):
- body_dict = _create_security_group_request_dict(security_group)
- request = _get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app())
- return response
+def security_group_template(**kwargs):
+ sg = kwargs.copy()
+ sg.setdefault('tenant_id', '123')
+ sg.setdefault('name', 'test')
+ sg.setdefault('description', 'test-description')
+ return sg
-def _create_security_group_request_dict(security_group):
- sg = {}
- if security_group is not None:
- name = security_group.get('name', None)
- description = security_group.get('description', None)
- if name:
- sg['name'] = security_group['name']
- if description:
- sg['description'] = security_group['description']
- return {'security_group': sg}
+def security_group_db(security_group, id=None):
+ attrs = security_group.copy()
+ if 'tenant_id' in attrs:
+ attrs['project_id'] = attrs.pop('tenant_id')
+ if id is not None:
+ attrs['id'] = id
+ attrs.setdefault('rules', [])
+ attrs.setdefault('instances', [])
+ return AttrDict(attrs)
+
+
+def security_group_rule_template(**kwargs):
+ rule = kwargs.copy()
+ rule.setdefault('ip_protocol', 'tcp')
+ rule.setdefault('from_port', 22)
+ rule.setdefault('to_port', 22)
+ rule.setdefault('parent_group_id', 2)
+ return rule
+
+
+def security_group_rule_db(rule, id=None):
+ attrs = rule.copy()
+ if 'ip_protocol' in attrs:
+ attrs['protocol'] = attrs.pop('ip_protocol')
+ return AttrDict(attrs)
def return_server(context, server_id):
@@ -72,13 +85,11 @@ def return_server_by_uuid(context, server_uuid):
def return_non_running_server(context, server_id):
- return {'id': server_id, 'state': 0x02,
- 'host': "localhost"}
+ return {'id': server_id, 'state': 0x02, 'host': "localhost"}
-def return_security_group(context, project_id, group_name):
- return {'id': 1, 'name': group_name, "instances": [
- {'id': 1}]}
+def return_security_group_by_name(context, project_id, group_name):
+ return {'id': 1, 'name': group_name, "instances": [{'id': 1}]}
def return_security_group_without_instances(context, project_id, group_name):
@@ -89,328 +100,251 @@ def return_server_nonexistent(context, server_id):
raise exception.InstanceNotFound(instance_id=server_id)
+class StubExtensionManager(object):
+ def register(self, *args):
+ pass
+
+
class TestSecurityGroups(test.TestCase):
def setUp(self):
super(TestSecurityGroups, self).setUp()
+ self.controller = security_groups.SecurityGroupController()
+ self.manager = security_groups.Security_groups(StubExtensionManager())
+
def tearDown(self):
super(TestSecurityGroups, self).tearDown()
- def _create_security_group_request_dict(self, security_group):
- sg = {}
- if security_group is not None:
- name = security_group.get('name', None)
- description = security_group.get('description', None)
- if name:
- sg['name'] = security_group['name']
- if description:
- sg['description'] = security_group['description']
- return {'security_group': sg}
-
- def _format_create_xml_request_body(self, body_dict):
- sg = body_dict['security_group']
- body_parts = []
- body_parts.extend([
- '<?xml version="1.0" encoding="UTF-8"?>',
- '<security_group xmlns="http://docs.openstack.org/ext/'
- 'securitygroups/api/v1.1"',
- ' name="%s">' % (sg['name'])])
- if 'description' in sg:
- body_parts.append('<description>%s</description>'
- % sg['description'])
- body_parts.append('</security_group>')
- return ''.join(body_parts)
-
- def _get_create_request_xml(self, body_dict):
- req = webob.Request.blank('/v1.1/123/os-security-groups')
- req.headers['Content-Type'] = 'application/xml'
- req.content_type = 'application/xml'
- req.accept = 'application/xml'
- req.method = 'POST'
- req.body = self._format_create_xml_request_body(body_dict)
- return req
-
- def _create_security_group_xml(self, security_group):
- body_dict = self._create_security_group_request_dict(security_group)
- request = self._get_create_request_xml(body_dict)
- response = request.get_response(fakes.wsgi_app())
- return response
-
- def _delete_security_group(self, id):
- request = webob.Request.blank('/v1.1/123/os-security-groups/%s'
- % id)
- request.method = 'DELETE'
- response = request.get_response(fakes.wsgi_app())
- return response
-
- def test_create_security_group_json(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
- res_dict = json.loads(response.body)
- self.assertEqual(res_dict['security_group']['name'], "test")
+ def test_create_security_group(self):
+ sg = security_group_template()
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ res_dict = self.controller.create(req, {'security_group': sg})
+ self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
- "group-description")
- self.assertEquals(response.status_int, 200)
-
- def test_create_security_group_xml(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = "group-description"
- response = \
- self._create_security_group_xml(security_group)
-
- self.assertEquals(response.status_int, 200)
- dom = minidom.parseString(response.body)
- sg = dom.childNodes[0]
- self.assertEquals(sg.nodeName, 'security_group')
- self.assertEqual(security_group['name'], sg.getAttribute('name'))
-
- def test_create_security_group_with_no_name_json(self):
- security_group = {}
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_no_description_json(self):
- security_group = {}
- security_group['name'] = "test"
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_blank_name_json(self):
- security_group = {}
- security_group['name'] = ""
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_whitespace_name_json(self):
- security_group = {}
- security_group['name'] = " "
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_blank_description_json(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = ""
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_whitespace_description_json(self):
- security_group = {}
- security_group['name'] = "name"
- security_group['description'] = " "
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_duplicate_name_json(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
-
- self.assertEquals(response.status_int, 200)
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_with_no_body_json(self):
- request = _get_create_request_json(body_dict=None)
- response = request.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 422)
+ 'test-description')
+
+ def test_create_security_group_with_no_name(self):
+ sg = security_group_template()
+ del sg['name']
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, sg)
+
+ def test_create_security_group_with_no_description(self):
+ sg = security_group_template()
+ del sg['description']
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_with_blank_name(self):
+ sg = security_group_template(name='')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_with_whitespace_name(self):
+ sg = security_group_template(name=' ')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_with_blank_description(self):
+ sg = security_group_template(description='')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_with_whitespace_description(self):
+ sg = security_group_template(description=' ')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_with_duplicate_name(self):
+ sg = security_group_template()
+
+ # FIXME: Stub out _get instead of creating twice
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.controller.create(req, {'security_group': sg})
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_with_no_body(self):
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, None)
def test_create_security_group_with_no_security_group(self):
- body_dict = {}
- body_dict['no-securityGroup'] = None
- request = _get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 422)
-
- def test_create_security_group_above_255_characters_name_json(self):
- security_group = {}
- security_group['name'] = ("1234567890123456"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890")
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
-
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_above_255_characters_description_json(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = ("1234567890123456"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890")
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_non_string_name_json(self):
- security_group = {}
- security_group['name'] = 12
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
-
- def test_create_security_group_non_string_description_json(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = 12
- response = _create_security_group_json(security_group)
- self.assertEquals(response.status_int, 400)
+ body = {'no-securityGroup': None}
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, body)
+
+ def test_create_security_group_above_255_characters_name(self):
+ sg = security_group_template(name='1234567890' * 26)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_above_255_characters_description(self):
+ sg = security_group_template(description='1234567890' * 26)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_non_string_name(self):
+ sg = security_group_template(name=12)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_create_security_group_non_string_description(self):
+ sg = security_group_template(description=12)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
def test_get_security_group_list(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
-
- req = webob.Request.blank('/v1.1/123/os-security-groups')
- req.headers['Content-Type'] = 'application/json'
- req.method = 'GET'
- response = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(response.body)
-
- expected = {'security_groups': [
- {'id': 1,
- 'name':"default",
- 'tenant_id': "123",
- "description":"default",
- "rules": []
- },
- ]
- }
- expected['security_groups'].append(
- {
- 'id': 2,
- 'name': "test",
- 'tenant_id': "123",
- "description": "group-description",
- "rules": []
- }
- )
- self.assertEquals(response.status_int, 200)
+ groups = []
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ groups.append(sg)
+ expected = {'security_groups': groups}
+
+ def return_security_groups(context, project_id):
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_project',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups')
+ res_dict = self.controller.index(req)
+
self.assertEquals(res_dict, expected)
def test_get_security_group_by_id(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
-
- res_dict = json.loads(response.body)
- req = webob.Request.blank('/v1.1/123/os-security-groups/%s' %
- res_dict['security_group']['id'])
- req.headers['Content-Type'] = 'application/json'
- req.method = 'GET'
- response = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(response.body)
+ sg = security_group_template(id=2, rules=[])
- expected = {
- 'security_group': {
- 'id': 2,
- 'name': "test",
- 'tenant_id': "123",
- 'description': "group-description",
- 'rules': []
- }
- }
+ def return_security_group(context, group_id):
+ self.assertEquals(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups/2')
+ res_dict = self.controller.show(req, '2')
+
+ expected = {'security_group': sg}
self.assertEquals(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
- req = webob.Request.blank('/v1.1/123/os-security-groups/invalid')
- req.headers['Content-Type'] = 'application/json'
- req.method = 'GET'
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
- req = webob.Request.blank('/v1.1/123/os-security-groups/111111111')
- req.headers['Content-Type'] = 'application/json'
- req.method = 'GET'
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 404)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups/111111111')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, '111111111')
def test_delete_security_group_by_id(self):
- security_group = {}
- security_group['name'] = "test"
- security_group['description'] = "group-description"
- response = _create_security_group_json(security_group)
- security_group = json.loads(response.body)['security_group']
- response = self._delete_security_group(security_group['id'])
- self.assertEquals(response.status_int, 202)
+ sg = security_group_template(id=1, rules=[])
+
+ self.called = False
+
+ def security_group_destroy(context, id):
+ self.called = True
+
+ def return_security_group(context, group_id):
+ self.assertEquals(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_destroy',
+ security_group_destroy)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups/1')
+ self.controller.delete(req, '1')
- response = self._delete_security_group(security_group['id'])
- self.assertEquals(response.status_int, 404)
+ self.assertTrue(self.called)
def test_delete_security_group_by_invalid_id(self):
- response = self._delete_security_group('invalid')
- self.assertEquals(response.status_int, 400)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
- response = self._delete_security_group(11111111)
- self.assertEquals(response.status_int, 404)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-groups/11111111')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, '11111111')
def test_associate_by_non_existing_security_group_name(self):
body = dict(addSecurityGroup=dict(name='non-existing'))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 404)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, body, req, '1')
+
+ def test_associate_by_invalid_server_id(self):
+ body = dict(addSecurityGroup=dict(name='test'))
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/invalid/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, body, req, 'invalid')
def test_associate_without_body(self):
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, body, req, '1')
def test_associate_no_security_group_name(self):
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, body, req, '1')
def test_associate_security_group_name_with_whitespaces(self):
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, body, req, '1')
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group)
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 404)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, body, req, '1')
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
@@ -419,26 +353,22 @@ class TestSecurityGroups(test.TestCase):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, body, req, '1')
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group)
+ return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, body, req, '1')
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
@@ -446,104 +376,79 @@ class TestSecurityGroups(test.TestCase):
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 202)
-
- def test_associate_xml(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
- nova.db.instance_add_security_group(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_without_instances)
- self.mox.ReplayAll()
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/xml'
- req.method = 'POST'
- req.body = """<addSecurityGroup>
- <name>test</name>
- </addSecurityGroup>"""
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.manager._addSecurityGroup(body, req, '1')
def test_disassociate_by_non_existing_security_group_name(self):
body = dict(removeSecurityGroup=dict(name='non-existing'))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 404)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, body, req, '1')
+
+ def test_disassociate_by_invalid_server_id(self):
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name='test'))
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/invalid/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, body, req,
+ 'invalid')
def test_disassociate_without_body(self):
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, body, req, '1')
def test_disassociate_no_security_group_name(self):
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, body, req, '1')
def test_disassociate_security_group_name_with_whitespaces(self):
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, body, req, '1')
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
- body = dict(removeSecurityGroup=dict(name="test"))
self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group)
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 404)
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, body, req, '1')
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group)
+ return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, body, req, '1')
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
@@ -552,12 +457,10 @@ class TestSecurityGroups(test.TestCase):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 400)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, body, req, '1')
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
@@ -568,377 +471,242 @@ class TestSecurityGroups(test.TestCase):
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group)
+ return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = json.dumps(body)
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 202)
-
- def test_disassociate_xml(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
- nova.db.instance_remove_security_group(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group)
- self.mox.ReplayAll()
- req = webob.Request.blank('/v1.1/123/servers/%s/action' % FAKE_UUID)
- req.headers['Content-Type'] = 'application/xml'
- req.method = 'POST'
- req.body = """<removeSecurityGroup>
- <name>test</name>
- </removeSecurityGroup>"""
- response = req.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 202)
+ req = fakes.HTTPRequest.blank('/v1.1/123/servers/1/action')
+ self.manager._removeSecurityGroup(body, req, '1')
class TestSecurityGroupRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupRules, self).setUp()
- security_group = {}
- security_group['name'] = "authorize-revoke"
- security_group['description'] = ("Security group created for "
- " authorize-revoke testing")
- response = _create_security_group_json(security_group)
- security_group = json.loads(response.body)
- self.parent_security_group = security_group['security_group']
-
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "parent_group_id": self.parent_security_group['id'],
- "cidr": "10.0.0.0/24"
- }
- }
- res = self._create_security_group_rule_json(rules)
- self.assertEquals(res.status_int, 200)
- self.security_group_rule = json.loads(res.body)['security_group_rule']
+
+ controller = security_groups.SecurityGroupController()
+
+ sg1 = security_group_template(id=1)
+ sg2 = security_group_template(id=2,
+ name='authorize_revoke',
+ description='authorize-revoke testing')
+ db1 = security_group_db(sg1)
+ db2 = security_group_db(sg2)
+
+ def return_security_group(context, group_id):
+ if group_id == db1['id']:
+ return db1
+ if group_id == db2['id']:
+ return db2
+ raise exception.NotFound()
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ self.parent_security_group = db2
+
+ self.controller = security_groups.SecurityGroupRulesController()
def tearDown(self):
super(TestSecurityGroupRules, self).tearDown()
- def _create_security_group_rule_json(self, rules):
- request = webob.Request.blank('/v1.1/123/os-security-group-rules')
- request.headers['Content-Type'] = 'application/json'
- request.method = 'POST'
- request.body = json.dumps(rules)
- response = request.get_response(fakes.wsgi_app())
- return response
-
- def _delete_security_group_rule(self, id):
- request = webob.Request.blank('/v1.1/123/os-security-group-rules/%s'
- % id)
- request.method = 'DELETE'
- response = request.get_response(fakes.wsgi_app())
- return response
-
- def test_create_by_cidr_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "parent_group_id": 2,
- "cidr": "10.2.3.124/24"
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- security_group_rule = json.loads(response.body)['security_group_rule']
- self.assertEquals(response.status_int, 200)
+ def test_create_by_cidr(self):
+ rule = security_group_rule_template(cidr='10.2.3.124/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'], 2)
self.assertEquals(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
- def test_create_by_group_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "group_id": "1",
- "parent_group_id": "%s"
- % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 200)
- security_group_rule = json.loads(response.body)['security_group_rule']
+ def test_create_by_group_id(self):
+ rule = security_group_rule_template(group_id='1')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'], 2)
- def test_create_add_existing_rules_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "cidr": "10.0.0.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_no_body_json(self):
- request = webob.Request.blank('/v1.1/123/os-security-group-rules')
- request.headers['Content-Type'] = 'application/json'
- request.method = 'POST'
- request.body = json.dumps(None)
- response = request.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 422)
-
- def test_create_with_no_security_group_rule_in_body_json(self):
- request = webob.Request.blank('/v1.1/123/os-security-group-rules')
- request.headers['Content-Type'] = 'application/json'
- request.method = 'POST'
- body_dict = {'test': "test"}
- request.body = json.dumps(body_dict)
- response = request.get_response(fakes.wsgi_app())
- self.assertEquals(response.status_int, 422)
-
- def test_create_with_invalid_parent_group_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "parent_group_id": "invalid"
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_non_existing_parent_group_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "group_id": "invalid",
- "parent_group_id": "1111111111111"
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 404)
-
- def test_create_with_invalid_protocol_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "invalid-protocol",
- "from_port": "22",
- "to_port": "22",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_no_protocol_json(self):
- rules = {
- "security_group_rule": {
- "from_port": "22",
- "to_port": "22",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_invalid_from_port_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "666666",
- "to_port": "22",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_invalid_to_port_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "666666",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_non_numerical_from_port_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "invalid",
- "to_port": "22",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_non_numerical_to_port_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "invalid",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_no_to_port_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "cidr": "10.2.2.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_invalid_cidr_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "cidr": "10.2.22222.0/24",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_no_cidr_group_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- security_group_rule = json.loads(response.body)['security_group_rule']
- self.assertEquals(response.status_int, 200)
+ def test_create_add_existing_rules(self):
+ rule = security_group_rule_template(cidr='10.0.0.0/24')
+
+ self.parent_security_group['rules'] = [security_group_rule_db(rule)]
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_body(self):
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, None)
+
+ def test_create_with_no_security_group_rule_in_body(self):
+ rules = {'test': 'test'}
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, rules)
+
+ def test_create_with_invalid_parent_group_id(self):
+ rule = security_group_rule_template(parent_group_id='invalid')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_existing_parent_group_id(self):
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id='1111111111111')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_protocol(self):
+ rule = security_group_rule_template(ip_protocol='invalid-protocol',
+ cidr='10.2.2.0/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_protocol(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24')
+ del rule['ip_protocol']
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_from_port(self):
+ rule = security_group_rule_template(from_port='666666',
+ cidr='10.2.2.0/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_to_port(self):
+ rule = security_group_rule_template(to_port='666666',
+ cidr='10.2.2.0/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_numerical_from_port(self):
+ rule = security_group_rule_template(from_port='invalid',
+ cidr='10.2.2.0/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_numerical_to_port(self):
+ rule = security_group_rule_template(to_port='invalid',
+ cidr='10.2.2.0/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_from_port(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24')
+ del rule['from_port']
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_to_port(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24')
+ del rule['to_port']
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_cidr(self):
+ rule = security_group_rule_template(cidr='10.2.2222.0/24')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_cidr_group(self):
+ rule = security_group_rule_template()
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
- def test_create_with_invalid_group_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "group_id": "invalid",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_empty_group_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "group_id": "invalid",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_with_invalid_group_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "group_id": "222222",
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
-
- def test_create_rule_with_same_group_parent_id_json(self):
- rules = {
- "security_group_rule": {
- "ip_protocol": "tcp",
- "from_port": "22",
- "to_port": "22",
- "group_id": "%s" % self.parent_security_group['id'],
- "parent_group_id": "%s" % self.parent_security_group['id'],
- }
- }
-
- response = self._create_security_group_rule_json(rules)
- self.assertEquals(response.status_int, 400)
+ def test_create_with_invalid_group_id(self):
+ rule = security_group_rule_template(group_id='invalid')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_empty_group_id(self):
+ rule = security_group_rule_template(group_id='')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_nonexist_group_id(self):
+ rule = security_group_rule_template(group_id='222222')
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_rule_with_same_group_parent_id(self):
+ rule = security_group_rule_template(group_id=2)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
def test_delete(self):
- response = self._delete_security_group_rule(
- self.security_group_rule['id'])
- self.assertEquals(response.status_int, 202)
+ rule = security_group_rule_template(id=10)
+
+ def security_group_rule_get(context, id):
+ return security_group_rule_db(rule)
+
+ def security_group_rule_destroy(context, id):
+ pass
+
+ self.stubs.Set(nova.db, 'security_group_rule_get',
+ security_group_rule_get)
+ self.stubs.Set(nova.db, 'security_group_rule_destroy',
+ security_group_rule_destroy)
- response = self._delete_security_group_rule(
- self.security_group_rule['id'])
- self.assertEquals(response.status_int, 404)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules/10')
+ self.controller.delete(req, '10')
def test_delete_invalid_rule_id(self):
- response = self._delete_security_group_rule('invalid')
- self.assertEquals(response.status_int, 400)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules' +
+ '/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
def test_delete_non_existing_rule_id(self):
- response = self._delete_security_group_rule(22222222222222)
- self.assertEquals(response.status_int, 404)
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-security-group-rules' +
+ '/22222222222222')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, '22222222222222')
class TestSecurityGroupRulesXMLDeserializer(unittest.TestCase):
diff --git a/nova/tests/api/openstack/test_volume_types.py b/nova/tests/api/openstack/contrib/test_volume_types.py
index 192e66854..ec1c44854 100644
--- a/nova/tests/api/openstack/test_volume_types.py
+++ b/nova/tests/api/openstack/contrib/test_volume_types.py
@@ -21,6 +21,7 @@ from nova import exception
from nova import context
from nova import test
from nova import log as logging
+from nova.api.openstack.contrib import volumetypes
from nova.volume import volume_types
from nova.tests.api.openstack import fakes
@@ -75,6 +76,7 @@ class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
+ self.controller = volumetypes.VolumeTypesController()
def tearDown(self):
self.stubs.UnsetAll()
@@ -83,11 +85,9 @@ class VolumeTypesApiTest(test.TestCase):
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
- req = webob.Request.blank('/v1.1/123/os-volume-types')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
+ res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict))
for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']:
@@ -97,65 +97,60 @@ class VolumeTypesApiTest(test.TestCase):
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
- req = webob.Request.blank('/v1.1/123/os-volume-types')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
+ res_dict = self.controller.index(req)
+
self.assertEqual(0, len(res_dict))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
- req = webob.Request.blank('/v1.1/123/os-volume-types/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1')
+ res_dict = self.controller.show(req, 1)
+
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
- req = webob.Request.blank('/v1.1/123/os-volume-types/777')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(404, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, '777')
def test_volume_types_delete(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
- req = webob.Request.blank('/v1.1/123/os-volume-types/1')
- req.method = 'DELETE'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1')
+ self.controller.delete(req, 1)
def test_volume_types_delete_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
- req = webob.Request.blank('/v1.1/123/os-volume-types/777')
- req.method = 'DELETE'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(404, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, '777')
def test_create(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
- req = webob.Request.blank('/v1.1/123/os-volume-types')
- req.method = 'POST'
- req.body = '{"volume_type": {"name": "vol_type_1", '\
- '"extra_specs": {"key1": "value1"}}}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ body = {"volume_type": {"name": "vol_type_1",
+ "extra_specs": {"key1": "value1"}}}
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
+ res_dict = self.controller.create(req, body)
+
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
@@ -164,8 +159,7 @@ class VolumeTypesApiTest(test.TestCase):
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
- req = webob.Request.blank('/v1.1/123/os-volume-types')
- req.method = 'POST'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+
+ req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, '')
diff --git a/nova/tests/api/openstack/test_volume_types_extra_specs.py b/nova/tests/api/openstack/contrib/test_volume_types_extra_specs.py
index e458c5e19..796478838 100644
--- a/nova/tests/api/openstack/test_volume_types_extra_specs.py
+++ b/nova/tests/api/openstack/contrib/test_volume_types_extra_specs.py
@@ -26,6 +26,7 @@ import os.path
from nova import test
from nova.api import openstack
from nova.api.openstack import extensions
+from nova.api.openstack.contrib import volumetypes
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -63,119 +64,106 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
super(VolumeTypesExtraSpecsTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.api_path = '/v1.1/123/os-volume-types/1/extra_specs'
+ self.controller = volumetypes.VolumeTypeExtraSpecsController()
def test_index(self):
self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
- request = webob.Request.blank(self.api_path)
- res = request.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank(self.api_path)
+ res_dict = self.controller.index(req, 1)
+
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path)
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank(self.api_path)
+ res_dict = self.controller.index(req, 1)
+
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/key5')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- res_dict = json.loads(res.body)
- self.assertEqual('application/json', res.headers['Content-Type'])
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/key5')
+ res_dict = self.controller.show(req, 1, 'key5')
+
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/key6')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(404, res.status_int)
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/key6')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1, 'key6')
def test_delete(self):
self.stubs.Set(nova.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/key5')
- req.method = 'DELETE'
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/key5')
+ self.controller.delete(req, 1, 'key5')
def test_create(self):
self.stubs.Set(nova.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path)
- req.method = 'POST'
- req.body = '{"extra_specs": {"key1": "value1"}}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
+ body = {"extra_specs": {"key1": "value1"}}
+
+ req = fakes.HTTPRequest.blank(self.api_path)
+ res_dict = self.controller.create(req, 1, body)
+
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_create_empty_body(self):
self.stubs.Set(nova.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path)
- req.method = 'POST'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+
+ req = fakes.HTTPRequest.blank(self.api_path)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, 1, '')
def test_update_item(self):
self.stubs.Set(nova.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/key1')
- req.method = 'PUT'
- req.body = '{"key1": "value1"}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- self.assertEqual('application/json', res.headers['Content-Type'])
- res_dict = json.loads(res.body)
+ body = {"key1": "value1"}
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/key1')
+ res_dict = self.controller.update(req, 1, 'key1', body)
+
self.assertEqual('value1', res_dict['key1'])
def test_update_item_empty_body(self):
self.stubs.Set(nova.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/key1')
- req.method = 'PUT'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/key1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'key1', '')
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/key1')
- req.method = 'PUT'
- req.body = '{"key1": "value1", "key2": "value2"}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+ body = {"key1": "value1", "key2": "value2"}
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/key1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
- req = webob.Request.blank(self.api_path + '/bad')
- req.method = 'PUT'
- req.body = '{"key1": "value1"}'
- req.headers["content-type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, res.status_int)
+ body = {"key1": "value1"}
+
+ req = fakes.HTTPRequest.blank(self.api_path + '/bad')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'bad', body)
diff --git a/nova/tests/api/openstack/contrib/test_volumes.py b/nova/tests/api/openstack/contrib/test_volumes.py
index 0a3023e48..a130d1140 100644
--- a/nova/tests/api/openstack/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/contrib/test_volumes.py
@@ -81,9 +81,6 @@ class BootFromVolumeTest(test.TestCase):
self.assertEqual(res.status_int, 200)
server = json.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual(2, int(server['flavor']['id']))
- self.assertEqual(u'test_server', server['name'])
- self.assertEqual(IMAGE_UUID, server['image']['id'])
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(len(_block_device_mapping_seen), 1)
self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
diff --git a/nova/tests/api/openstack/test_consoles.py b/nova/tests/api/openstack/test_consoles.py
index 679419916..0b682be0a 100644
--- a/nova/tests/api/openstack/test_consoles.py
+++ b/nova/tests/api/openstack/test_consoles.py
@@ -126,9 +126,9 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
return instance
-class ConsolesTest(test.TestCase):
+class ConsolesControllerTest(test.TestCase):
def setUp(self):
- super(ConsolesTest, self).setUp()
+ super(ConsolesControllerTest, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
@@ -137,17 +137,16 @@ class ConsolesTest(test.TestCase):
self.instance_db.return_server_by_uuid)
self.uuid = str(utils.gen_uuid())
self.url = '/v1.1/fake/servers/%s/consoles' % self.uuid
+ self.controller = consoles.Controller()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
- self.assertTrue(instance_id, '10')
+ self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.API, 'create_console', fake_create_console)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller.create(req, self.uuid)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
@@ -166,45 +165,19 @@ class ConsolesTest(test.TestCase):
self.stubs.Set(console.API, 'get_console', fake_get_console)
- req = webob.Request.blank(self.url + '/20')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ res_dict = self.controller.show(req, self.uuid, '20')
self.assertDictMatch(res_dict, expected)
- def test_show_console_xml(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
- pool = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- return dict(id=console_id, password='fake_password',
- port='fake_port', pool=pool)
-
- self.stubs.Set(console.API, 'get_console', fake_get_console)
-
- req = webob.Request.blank(self.url + '/20.xml')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
-
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res_tree.tag, 'console')
- self.assertEqual(res_tree.xpath('id')[0].text, '20')
- self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
- self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
- self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
- self.assertEqual(res_tree.xpath('console_type')[0].text,
- 'fake_type')
-
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.API, 'get_console', fake_get_console)
- req = webob.Request.blank(self.url + '/20')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 404)
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
@@ -212,9 +185,9 @@ class ConsolesTest(test.TestCase):
self.stubs.Set(console.API, 'get_console', fake_get_console)
- req = webob.Request.blank(self.url + '/20')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 404)
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
@@ -236,52 +209,10 @@ class ConsolesTest(test.TestCase):
self.stubs.Set(console.API, 'get_consoles', fake_get_consoles)
- req = webob.Request.blank(self.url)
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req, self.uuid)
self.assertDictMatch(res_dict, expected)
- def test_list_consoles_xml(self):
- def fake_get_consoles(cons_self, context, instance_id):
- self.assertEqual(instance_id, self.uuid)
-
- pool1 = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- cons1 = dict(id=10, password='fake_password',
- port='fake_port', pool=pool1)
- pool2 = dict(console_type='fake_type2',
- public_hostname='fake_hostname2')
- cons2 = dict(id=11, password='fake_password2',
- port='fake_port2', pool=pool2)
- return [cons1, cons2]
-
- expected = {'consoles':
- [{'console': {'id': 10, 'console_type': 'fake_type'}},
- {'console': {'id': 11, 'console_type': 'fake_type2'}}]}
-
- self.stubs.Set(console.API, 'get_consoles', fake_get_consoles)
-
- req = webob.Request.blank(self.url + '.xml')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
-
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res_tree.tag, 'consoles')
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, 'console')
- self.assertEqual(res_tree[1].tag, 'console')
- self.assertEqual(len(res_tree[0]), 1)
- self.assertEqual(res_tree[0][0].tag, 'console')
- self.assertEqual(len(res_tree[1]), 1)
- self.assertEqual(res_tree[1][0].tag, 'console')
- self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
- self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
- self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
- 'fake_type')
- self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
- 'fake_type2')
-
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
@@ -298,29 +229,71 @@ class ConsolesTest(test.TestCase):
self.stubs.Set(console.API, 'get_console', fake_get_console)
self.stubs.Set(console.API, 'delete_console', fake_delete_console)
- req = webob.Request.blank(self.url + '/20')
- req.method = "DELETE"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 202)
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.controller.delete(req, self.uuid, '20')
- def test_show_console_unknown_console(self):
+ def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.API, 'delete_console', fake_delete_console)
- req = webob.Request.blank(self.url + '/20')
- req.method = "DELETE"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 404)
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
- def test_show_console_unknown_instance(self):
+ def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.API, 'delete_console', fake_delete_console)
- req = webob.Request.blank(self.url + '/20')
- req.method = "DELETE"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 404)
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+
+class TestConsolesXMLSerializer(test.TestCase):
+
+ serializer = consoles.ConsoleXMLSerializer()
+
+ def test_show(self):
+ fixture = {'console': {'id': 20,
+ 'password': 'fake_password',
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'console_type': 'fake_type'}}
+
+ output = self.serializer.serialize(fixture, 'show')
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'console')
+ self.assertEqual(res_tree.xpath('id')[0].text, '20')
+ self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
+ self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
+ self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
+ self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
+
+ def test_index(self):
+ fixture = {'consoles': [{'console': {'id': 10,
+ 'console_type': 'fake_type'}},
+ {'console': {'id': 11,
+ 'console_type': 'fake_type2'}}]}
+
+ output = self.serializer.serialize(fixture, 'index')
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'consoles')
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, 'console')
+ self.assertEqual(res_tree[1].tag, 'console')
+ self.assertEqual(len(res_tree[0]), 1)
+ self.assertEqual(res_tree[0][0].tag, 'console')
+ self.assertEqual(len(res_tree[1]), 1)
+ self.assertEqual(res_tree[1][0].tag, 'console')
+ self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
+ self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
+ self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
+ 'fake_type')
+ self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
+ 'fake_type2')
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index cb44288c7..b4fe3e730 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -102,6 +102,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"Createserverext",
"DeferredDelete",
"DiskConfig",
+ "ExtendedStatus",
"FlavorExtraSpecs",
"FlavorExtraData",
"Floating_ips",
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 25bea252b..9d50483b8 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -30,6 +30,10 @@ def return_server_by_uuid(context, uuid):
return stub_instance(1, uuid=uuid)
+def return_server_by_uuid_not_found(context, uuid):
+ raise exception.NotFound()
+
+
def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
@@ -118,8 +122,8 @@ class MockSetAdminPassword(object):
self.instance_id = None
self.password = None
- def __call__(self, context, instance_id, password):
- self.instance_id = instance_id
+ def __call__(self, context, instance, password):
+ self.instance_id = instance['uuid']
self.password = password
@@ -205,28 +209,37 @@ class ServerActionsControllerTest(test.TestCase):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.action, req, FAKE_UUID, body)
- def test_server_reboot_hard(self):
+ def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.controller.action(req, FAKE_UUID, body)
- def test_server_reboot_soft(self):
+ def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequest.blank(self.url)
self.controller.action(req, FAKE_UUID, body)
- def test_server_reboot_incorrect_type(self):
+ def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.action, req, FAKE_UUID, body)
- def test_server_reboot_missing_type(self):
+ def test_reboot_missing_type(self):
body = dict(reboot=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.action, req, FAKE_UUID, body)
+ def test_reboot_not_found(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid_not_found)
+
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action,
+ req, str(utils.gen_uuid()), body)
+
def test_server_rebuild_accepted_minimum(self):
new_return_server = return_server_with_attributes(image_ref='2')
self.stubs.Set(nova.db, 'instance_get', new_return_server)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index e6e528535..aa9c5c02c 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -1306,6 +1306,11 @@ class ServerStatusTest(test.TestCase):
task_states.REBOOTING)
self.assertEqual(response['server']['status'], 'REBOOT')
+ def test_reboot_hard(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING_HARD)
+ self.assertEqual(response['server']['status'], 'HARD_REBOOT')
+
def test_rebuild(self):
response = self._get_with_state(vm_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
@@ -1354,7 +1359,7 @@ class ServersControllerCreateTest(test.TestCase):
self.instance_cache_num += 1
instance = {
'id': self.instance_cache_num,
- 'display_name': 'server_test',
+ 'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
@@ -1390,8 +1395,10 @@ class ServersControllerCreateTest(test.TestCase):
request_spec['instance_properties']))
return instances
- def server_update(context, id, params):
- return instance_create(context, id)
+ def server_update(context, instance_id, params):
+ inst = self.instance_cache[instance_id]
+ inst.update(params)
+ return inst
def fake_method(*args, **kwargs):
pass
@@ -1402,10 +1409,6 @@ class ServersControllerCreateTest(test.TestCase):
def queue_get_for(context, *args):
return 'network_topic'
- def kernel_ramdisk_mapping(*args, **kwargs):
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- return (image_uuid, image_uuid)
-
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
@@ -1424,8 +1427,6 @@ class ServersControllerCreateTest(test.TestCase):
self.stubs.Set(nova.db, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
- self.stubs.Set(servers.Controller, "_get_kernel_ramdisk_from_image",
- kernel_ramdisk_mapping)
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
def _test_create_instance(self):
@@ -1441,10 +1442,7 @@ class ServersControllerCreateTest(test.TestCase):
server = self.controller.create(req, body)['server']
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
- self.assertEqual('server_test', server['name'])
self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual('2', server['flavor']['id'])
- self.assertEqual(image_uuid, server['image']['id'])
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
@@ -1613,12 +1611,6 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual(0, server['progress'])
- self.assertEqual('server_test', server['name'])
- self.assertEqual(expected_flavor, server['flavor'])
- self.assertEqual(expected_image, server['image'])
- self.assertEqual(access_ipv4, server['accessIPv4'])
- self.assertEqual(access_ipv6, server['accessIPv6'])
def test_create_instance(self):
# proper local hrefs must start with 'http://localhost/v1.1/'
@@ -1670,13 +1662,6 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual("BUILD", server["status"])
- self.assertEqual(0, server['progress'])
- self.assertEqual('server_test', server['name'])
- self.assertEqual(expected_flavor, server['flavor'])
- self.assertEqual(expected_image, server['image'])
- self.assertEqual('1.2.3.4', server['accessIPv4'])
- self.assertEqual('fead::1234', server['accessIPv6'])
def test_create_instance_invalid_key_name(self):
image_href = 'http://localhost/v1.1/images/2'
@@ -1777,7 +1762,6 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
- self.assertTrue(server['config_drive'])
def test_create_instance_with_config_drive_as_id(self):
self.config_drive = 2
@@ -1805,8 +1789,6 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
- self.assertTrue(server['config_drive'])
- self.assertEqual(2, server['config_drive'])
def test_create_instance_with_bad_config_drive(self):
self.config_drive = "asdf"
@@ -1859,7 +1841,6 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
- self.assertFalse(server['config_drive'])
def test_create_instance_bad_href(self):
image_href = 'asdf'
@@ -1879,24 +1860,6 @@ class ServersControllerCreateTest(test.TestCase):
def test_create_instance_local_href(self):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/v1.1/flavors/3'
- expected_flavor = {
- "id": "3",
- "links": [
- {
- "rel": "bookmark",
- "href": 'http://localhost/fake/flavors/3',
- },
- ],
- }
- expected_image = {
- "id": image_uuid,
- "links": [
- {
- "rel": "bookmark",
- "href": 'http://localhost/fake/images/%s' % image_uuid,
- },
- ],
- }
body = {
'server': {
'name': 'server_test',
@@ -1912,8 +1875,7 @@ class ServersControllerCreateTest(test.TestCase):
res = self.controller.create(req, body)
server = res['server']
- self.assertEqual(expected_flavor, server['flavor'])
- self.assertEqual(expected_image, server['image'])
+ self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_admin_pass(self):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
@@ -2417,62 +2379,6 @@ class TestAddressesXMLSerialization(test.TestCase):
str(ip['addr']))
-class TestGetKernelRamdiskFromImage(test.TestCase):
- """
- If we're building from an AMI-style image, we need to be able to fetch the
- kernel and ramdisk associated with the machine image. This information is
- stored with the image metadata and return via the ImageService.
-
- These tests ensure that we parse the metadata return the ImageService
- correctly and that we handle failure modes appropriately.
- """
-
- def test_status_not_active(self):
- """We should only allow fetching of kernel and ramdisk information if
- we have a 'fully-formed' image, aka 'active'
- """
- image_meta = {'id': 1, 'status': 'queued'}
- self.assertRaises(exception.Invalid, self._get_k_r, image_meta)
-
- def test_not_ami(self):
- """Anything other than ami should return no kernel and no ramdisk"""
- image_meta = {'id': 1, 'status': 'active', 'container_format': 'vhd'}
- kernel_id, ramdisk_id = self._get_k_r(image_meta)
- self.assertEqual(kernel_id, None)
- self.assertEqual(ramdisk_id, None)
-
- def test_ami_no_kernel(self):
- """If an ami is missing a kernel it should raise NotFound"""
- image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
- 'properties': {'ramdisk_id': 1}}
- self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
-
- def test_ami_no_ramdisk(self):
- """If an ami is missing a ramdisk, return kernel ID and None for
- ramdisk ID
- """
- image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
- 'properties': {'kernel_id': 1}}
- kernel_id, ramdisk_id = self._get_k_r(image_meta)
- self.assertEqual(kernel_id, 1)
- self.assertEqual(ramdisk_id, None)
-
- def test_ami_kernel_ramdisk_present(self):
- """Return IDs if both kernel and ramdisk are present"""
- image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
- 'properties': {'kernel_id': 1, 'ramdisk_id': 2}}
- kernel_id, ramdisk_id = self._get_k_r(image_meta)
- self.assertEqual(kernel_id, 1)
- self.assertEqual(ramdisk_id, 2)
-
- @staticmethod
- def _get_k_r(image_meta):
- """Rebinding function to a shorter name for convenience"""
- kernel_id, ramdisk_id = servers.Controller.\
- _do_get_kernel_ramdisk_from_image(image_meta)
- return kernel_id, ramdisk_id
-
-
class ServersViewBuilderTest(test.TestCase):
def setUp(self):
diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py
index cc77d7d26..82fce68ff 100644
--- a/nova/tests/api/openstack/test_users.py
+++ b/nova/tests/api/openstack/test_users.py
@@ -56,72 +56,29 @@ class UsersTest(test.TestCase):
fakemgr.add_user(User('id1', 'guy1', 'acc1', 'secret1', False))
fakemgr.add_user(User('id2', 'guy2', 'acc2', 'secret2', True))
+ self.controller = users.Controller()
+
def test_get_user_list(self):
- req = webob.Request.blank('/v1.1/fake/users')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/users')
+ res_dict = self.controller.index(req)
- self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['users']), 2)
- def test_get_user_list_xml(self):
- req = webob.Request.blank('/v1.1/fake/users.xml')
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
-
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res_tree.tag, 'users')
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, 'user')
- self.assertEqual(res_tree[0].get('id'), 'id1')
- self.assertEqual(res_tree[1].tag, 'user')
- self.assertEqual(res_tree[1].get('id'), 'id2')
-
def test_get_user_by_id(self):
- req = webob.Request.blank('/v1.1/fake/users/id2')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/users/id2')
+ res_dict = self.controller.show(req, 'id2')
self.assertEqual(res_dict['user']['id'], 'id2')
self.assertEqual(res_dict['user']['name'], 'guy2')
self.assertEqual(res_dict['user']['secret'], 'secret2')
self.assertEqual(res_dict['user']['admin'], True)
- self.assertEqual(res.status_int, 200)
-
- def test_get_user_by_id_xml(self):
- req = webob.Request.blank('/v1.1/fake/users/id2.xml')
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
-
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res_tree.tag, 'user')
- self.assertEqual(res_tree.get('id'), 'id2')
- self.assertEqual(res_tree.get('name'), 'guy2')
- self.assertEqual(res_tree.get('secret'), 'secret2')
- self.assertEqual(res_tree.get('admin'), 'True')
def test_user_delete(self):
- # Check the user exists
- req = webob.Request.blank('/v1.1/fake/users/id1')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- self.assertEqual(res_dict['user']['id'], 'id1')
- self.assertEqual(res.status_int, 200)
-
- # Delete the user
- req = webob.Request.blank('/v1.1/fake/users/id1')
- req.method = 'DELETE'
- res = req.get_response(fakes.wsgi_app())
+ req = fakes.HTTPRequest.blank('/v1.1/fake/users/id1')
+ res_dict = self.controller.delete(req, 'id1')
+
self.assertTrue('id1' not in [u.id for u in
fakes.FakeAuthManager.auth_data])
- self.assertEqual(res.status_int, 200)
-
- # Check the user is not returned (and returns 404)
- req = webob.Request.blank('/v1.1/fake/users/id1')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(res.status_int, 404)
def test_user_create(self):
secret = utils.generate_password()
@@ -129,15 +86,8 @@ class UsersTest(test.TestCase):
access='acc3',
secret=secret,
admin=True))
- req = webob.Request.blank('/v1.1/fake/users')
- req.headers["Content-Type"] = "application/json"
- req.method = 'POST'
- req.body = json.dumps(body)
-
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- self.assertEqual(res.status_int, 200)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/users')
+ res_dict = self.controller.create(req, body)
# NOTE(justinsb): This is a questionable assertion in general
# fake sets id=name, but others might not...
@@ -151,72 +101,57 @@ class UsersTest(test.TestCase):
fakes.FakeAuthManager.auth_data])
self.assertEqual(len(fakes.FakeAuthManager.auth_data), 3)
- def test_user_create_xml(self):
- secret = utils.generate_password()
- body = dict(user=dict(name='test_guy',
- access='acc3',
- secret=secret,
- admin=True))
- req = webob.Request.blank('/v1.1/fake/users.xml')
- req.headers["Content-Type"] = "application/json"
- req.method = 'POST'
- req.body = json.dumps(body)
-
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
-
- self.assertEqual(res.status_int, 200)
-
- # NOTE(justinsb): This is a questionable assertion in general
- # fake sets id=name, but others might not...
- self.assertEqual(res_tree.tag, 'user')
- self.assertEqual(res_tree.get('id'), 'test_guy')
-
- self.assertEqual(res_tree.get('name'), 'test_guy')
- self.assertEqual(res_tree.get('access'), 'acc3')
- self.assertEqual(res_tree.get('secret'), secret)
- self.assertEqual(res_tree.get('admin'), 'True')
- self.assertTrue('test_guy' in [u.id for u in
- fakes.FakeAuthManager.auth_data])
- self.assertEqual(len(fakes.FakeAuthManager.auth_data), 3)
-
def test_user_update(self):
new_secret = utils.generate_password()
body = dict(user=dict(name='guy2',
access='acc2',
secret=new_secret))
- req = webob.Request.blank('/v1.1/fake/users/id2')
- req.headers["Content-Type"] = "application/json"
- req.method = 'PUT'
- req.body = json.dumps(body)
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/users/id2')
+ res_dict = self.controller.update(req, 'id2', body)
- self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['user']['id'], 'id2')
self.assertEqual(res_dict['user']['name'], 'guy2')
self.assertEqual(res_dict['user']['access'], 'acc2')
self.assertEqual(res_dict['user']['secret'], new_secret)
self.assertEqual(res_dict['user']['admin'], True)
- def test_user_update_xml(self):
- new_secret = utils.generate_password()
- body = dict(user=dict(name='guy2',
- access='acc2',
- secret=new_secret))
- req = webob.Request.blank('/v1.1/fake/users/id2.xml')
- req.headers["Content-Type"] = "application/json"
- req.method = 'PUT'
- req.body = json.dumps(body)
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
+class TestUsersXMLSerializer(test.TestCase):
+
+ serializer = users.UserXMLSerializer()
+
+ def test_index(self):
+ fixture = {'users': [{'id': 'id1',
+ 'name': 'guy1',
+ 'secret': 'secret1',
+ 'admin': False},
+ {'id': 'id2',
+ 'name': 'guy2',
+ 'secret': 'secret2',
+ 'admin': True}]}
+
+ output = self.serializer.serialize(fixture, 'index')
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'users')
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, 'user')
+ self.assertEqual(res_tree[0].get('id'), 'id1')
+ self.assertEqual(res_tree[1].tag, 'user')
+ self.assertEqual(res_tree[1].get('id'), 'id2')
+
+ def test_show(self):
+ fixture = {'user': {'id': 'id2',
+ 'name': 'guy2',
+ 'secret': 'secret2',
+ 'admin': True}}
+
+ output = self.serializer.serialize(fixture, 'show')
+ res_tree = etree.XML(output)
- self.assertEqual(res.status_int, 200)
self.assertEqual(res_tree.tag, 'user')
self.assertEqual(res_tree.get('id'), 'id2')
self.assertEqual(res_tree.get('name'), 'guy2')
- self.assertEqual(res_tree.get('access'), 'acc2')
- self.assertEqual(res_tree.get('secret'), new_secret)
+ self.assertEqual(res_tree.get('secret'), 'secret2')
self.assertEqual(res_tree.get('admin'), 'True')
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index af762d3d6..496c3e8e6 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -36,7 +36,8 @@ FLAGS = flags.FLAGS
def zone_get(context, zone_id):
return dict(id=1, api_url='http://example.com', username='bob',
- password='xxx', weight_scale=1.0, weight_offset=0.0)
+ password='xxx', weight_scale=1.0, weight_offset=0.0,
+ name='darksecret')
def zone_create(context, values):
@@ -106,198 +107,81 @@ class ZonesTest(test.TestCase):
self.stubs.Set(nova.db, 'zone_create', zone_create)
self.stubs.Set(nova.db, 'zone_delete', zone_delete)
+ self.controller = zones.Controller()
+
def test_get_zone_list_scheduler(self):
self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler)
- req = webob.Request.blank('/v1.1/fake/zones')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- self.assertEqual(res.status_int, 200)
- self.assertEqual(len(res_dict['zones']), 2)
- def test_get_zone_list_scheduler_xml(self):
- self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler)
- req = webob.Request.blank('/v1.1/fake/zones.xml')
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones')
+ res_dict = self.controller.index(req)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res_tree.tag, '{%s}zones' % xmlutil.XMLNS_V10)
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree[1].tag, '{%s}zone' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_dict['zones']), 2)
def test_get_zone_list_db(self):
self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty)
self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
- req = webob.Request.blank('/v1.1/fake/zones')
- req.headers["Content-Type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(len(res_dict['zones']), 2)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones')
+ res_dict = self.controller.index(req)
- def test_get_zone_list_db_xml(self):
- self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty)
- self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
- req = webob.Request.blank('/v1.1/fake/zones.xml')
- req.headers["Content-Type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
-
- self.assertEqual(res.status_int, 200)
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res_tree.tag, '{%s}zones' % xmlutil.XMLNS_V10)
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree[1].tag, '{%s}zone' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_dict['zones']), 2)
def test_get_zone_by_id(self):
- req = webob.Request.blank('/v1.1/fake/zones/1')
- req.headers["Content-Type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones/1')
+ res_dict = self.controller.show(req, 1)
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('password' in res_dict['zone'])
- def test_get_zone_by_id_xml(self):
- req = webob.Request.blank('/v1.1/fake/zones/1.xml')
- req.headers["Content-Type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
-
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res_tree.tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('id'), '1')
- self.assertEqual(res_tree.get('api_url'), 'http://example.com')
- self.assertEqual(res_tree.get('password'), None)
-
def test_zone_delete(self):
- req = webob.Request.blank('/v1.1/fake/zones/1')
- req.headers["Content-Type"] = "application/json"
- res = req.get_response(fakes.wsgi_app())
-
- self.assertEqual(res.status_int, 200)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones/1')
+ self.controller.delete(req, 1)
def test_zone_create(self):
body = dict(zone=dict(api_url='http://example.com', username='fred',
password='fubar'))
- req = webob.Request.blank('/v1.1/fake/zones')
- req.headers["Content-Type"] = "application/json"
- req.method = 'POST'
- req.body = json.dumps(body)
- res = req.get_response(fakes.wsgi_app())
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones')
+ res_dict = self.controller.create(req, body)
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('username' in res_dict['zone'])
- def test_zone_create_xml(self):
- body = dict(zone=dict(api_url='http://example.com', username='fred',
- password='fubar'))
- req = webob.Request.blank('/v1.1/fake/zones.xml')
- req.headers["Content-Type"] = "application/json"
- req.method = 'POST'
- req.body = json.dumps(body)
-
- res = req.get_response(fakes.wsgi_app())
-
- self.assertEqual(res.status_int, 200)
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res_tree.tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('id'), '1')
- self.assertEqual(res_tree.get('api_url'), 'http://example.com')
- self.assertEqual(res_tree.get('username'), None)
-
def test_zone_update(self):
body = dict(zone=dict(username='zeb', password='sneaky'))
- req = webob.Request.blank('/v1.1/fake/zones/1')
- req.headers["Content-Type"] = "application/json"
- req.method = 'PUT'
- req.body = json.dumps(body)
- res = req.get_response(fakes.wsgi_app())
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones/1')
+ res_dict = self.controller.update(req, 1, body)
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
self.assertFalse('username' in res_dict['zone'])
- def test_zone_update_xml(self):
- body = dict(zone=dict(username='zeb', password='sneaky'))
- req = webob.Request.blank('/v1.1/fake/zones/1.xml')
- req.headers["Content-Type"] = "application/json"
- req.method = 'PUT'
- req.body = json.dumps(body)
-
- res = req.get_response(fakes.wsgi_app())
-
- self.assertEqual(res.status_int, 200)
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res_tree.tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('id'), '1')
- self.assertEqual(res_tree.get('api_url'), 'http://example.com')
- self.assertEqual(res_tree.get('username'), None)
-
def test_zone_info(self):
caps = ['cap1=a;b', 'cap2=c;d']
self.flags(zone_name='darksecret', zone_capabilities=caps)
self.stubs.Set(api, '_call_scheduler', zone_capabilities)
- body = dict(zone=dict(username='zeb', password='sneaky'))
- req = webob.Request.blank('/v1.1/fake/zones/info')
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones/info')
+ res_dict = self.controller.info(req)
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['zone']['name'], 'darksecret')
self.assertEqual(res_dict['zone']['cap1'], 'a;b')
self.assertEqual(res_dict['zone']['cap2'], 'c;d')
- def test_zone_info_xml(self):
- caps = ['cap1=a;b', 'cap2=c;d']
- self.flags(zone_name='darksecret', zone_capabilities=caps)
- self.stubs.Set(api, '_call_scheduler', zone_capabilities)
-
- body = dict(zone=dict(username='zeb', password='sneaky'))
- req = webob.Request.blank('/v1.1/fake/zones/info.xml')
-
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res_tree.tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('name'), 'darksecret')
- for elem in res_tree:
- self.assertEqual(elem.tag in ('{%s}cap1' % xmlutil.XMLNS_V10,
- '{%s}cap2' % xmlutil.XMLNS_V10),
- True)
- if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
- self.assertEqual(elem.text, 'a;b')
- elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
- self.assertEqual(elem.text, 'c;d')
-
def test_zone_select(self):
key = 'c286696d887c9aa0611bbb3e2025a45a'
self.flags(build_plan_encryption_key=key)
self.stubs.Set(api, 'select', zone_select)
- req = webob.Request.blank('/v1.1/fake/zones/select')
- req.method = 'POST'
- req.headers["Content-Type"] = "application/json"
# Select queries end up being JSON encoded twice.
# Once to a string and again as an HTTP POST Body
- req.body = json.dumps(json.dumps({}))
+ body = json.dumps({})
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
- self.assertEqual(res.status_int, 200)
+ req = fakes.HTTPRequest.blank('/v1.1/fake/zones/select')
+ res_dict = self.controller.select(req, body)
self.assertTrue('weights' in res_dict)
@@ -317,21 +201,23 @@ class ZonesTest(test.TestCase):
self.assertEqual(len(item), 2)
self.assertTrue('weight' in item)
- def test_zone_select_xml(self):
+
+class TestZonesXMLSerializer(test.TestCase):
+
+ serializer = zones.ZonesXMLSerializer()
+
+ def test_select(self):
key = 'c286696d887c9aa0611bbb3e2025a45a'
- self.flags(build_plan_encryption_key=key)
- self.stubs.Set(api, 'select', zone_select)
- req = webob.Request.blank('/v1.1/fake/zones/select.xml')
- req.method = 'POST'
- req.headers["Content-Type"] = "application/json"
- # Select queries end up being JSON encoded twice.
- # Once to a string and again as an HTTP POST Body
- req.body = json.dumps(json.dumps({}))
+ encrypt = crypto.encryptor(key)
+ decrypt = crypto.decryptor(key)
+
+ item = GLOBAL_BUILD_PLAN[0]
+ fixture = {'weights': {'blob': encrypt(json.dumps(item)),
+ 'weight': item['weight']}}
- res = req.get_response(fakes.wsgi_app())
- res_tree = etree.fromstring(res.body)
- self.assertEqual(res.status_int, 200)
+ output = self.serializer.serialize(fixture, 'select')
+ res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, '{%s}weights' % xmlutil.XMLNS_V10)
@@ -345,7 +231,6 @@ class ZonesTest(test.TestCase):
elif chld.tag.endswith('weight'):
weight = chld.text
- decrypt = crypto.decryptor(FLAGS.build_plan_encryption_key)
secret_item = json.loads(decrypt(blob))
found = False
for original_item in GLOBAL_BUILD_PLAN:
@@ -358,3 +243,41 @@ class ZonesTest(test.TestCase):
self.assertTrue(found)
self.assertEqual(len(item), 2)
self.assertTrue(weight)
+
+ def test_index(self):
+ fixture = {'zones': zone_get_all_scheduler()}
+
+ output = self.serializer.serialize(fixture, 'index')
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}zones' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, '{%s}zone' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree[1].tag, '{%s}zone' % xmlutil.XMLNS_V10)
+
+ def test_show(self):
+ zone = {'id': 1,
+ 'api_url': 'http://example.com',
+ 'name': 'darksecret',
+ 'cap1': 'a;b',
+ 'cap2': 'c;d'}
+ fixture = {'zone': zone}
+
+ output = self.serializer.serialize(fixture, 'show')
+ print repr(output)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}zone' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('id'), '1')
+ self.assertEqual(res_tree.get('api_url'), 'http://example.com')
+ self.assertEqual(res_tree.get('password'), None)
+
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ for elem in res_tree:
+ self.assertEqual(elem.tag in ('{%s}cap1' % xmlutil.XMLNS_V10,
+ '{%s}cap2' % xmlutil.XMLNS_V10),
+ True)
+ if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'a;b')
+ elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'c;d')
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index 1567393e3..5c0f4f05e 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import StringIO
from nova import exception
@@ -74,11 +75,15 @@ class FakeGlance(object):
pass
def get_image_meta(self, image_id):
- return self.IMAGE_FIXTURES[int(image_id)]['image_meta']
+ meta = copy.deepcopy(self.IMAGE_FIXTURES[int(image_id)]['image_meta'])
+ meta['id'] = image_id
+ return meta
def get_image(self, image_id):
image = self.IMAGE_FIXTURES[int(image_id)]
- return image['image_meta'], image['image_data']
+ meta = copy.deepcopy(image['image_meta'])
+ meta['id'] = image_id
+ return meta, image['image_data']
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 36f62ac01..58aca5778 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -292,10 +292,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- # Reenable when bug fixed
- self.assertEqual(metadata, created_server.get('metadata'))
- # Check it's there
-
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
diff --git a/nova/tests/scheduler/test_distributed_scheduler.py b/nova/tests/scheduler/test_distributed_scheduler.py
index a66fae343..ad2d1b1b6 100644
--- a/nova/tests/scheduler/test_distributed_scheduler.py
+++ b/nova/tests/scheduler/test_distributed_scheduler.py
@@ -25,7 +25,6 @@ from nova import exception
from nova import rpc
from nova import test
from nova.compute import api as compute_api
-from nova.scheduler import driver
from nova.scheduler import distributed_scheduler
from nova.scheduler import least_cost
from nova.scheduler import zone_manager
@@ -120,7 +119,7 @@ class DistributedSchedulerTestCase(test.TestCase):
fake_context = context.RequestContext('user', 'project')
request_spec = dict(instance_type=dict(memory_mb=1, local_gb=1))
- self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
+ self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec)
def test_run_instance_with_blob_hint(self):
@@ -189,7 +188,7 @@ class DistributedSchedulerTestCase(test.TestCase):
fake_context = context.RequestContext('user', 'project')
- self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
+ self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, {})
self.assertTrue(self.was_admin)
@@ -212,10 +211,11 @@ class DistributedSchedulerTestCase(test.TestCase):
self.next_weight = 1.0
- def _fake_filter_hosts(topic, request_info, unfiltered_hosts):
+ def _fake_filter_hosts(topic, request_info, unfiltered_hosts,
+ options):
return unfiltered_hosts
- def _fake_weigh_hosts(request_info, hosts):
+ def _fake_weighted_sum(functions, hosts, options):
self.next_weight += 2.0
host, hostinfo = hosts[0]
return least_cost.WeightedHost(self.next_weight, host=host,
@@ -225,7 +225,7 @@ class DistributedSchedulerTestCase(test.TestCase):
fake_context = context.RequestContext('user', 'project')
sched.zone_manager = ds_fakes.FakeZoneManager()
self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts)
- self.stubs.Set(least_cost, 'weigh_hosts', _fake_weigh_hosts)
+ self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
@@ -260,3 +260,12 @@ class DistributedSchedulerTestCase(test.TestCase):
self.assertTrue(isinstance(weighted_host, least_cost.WeightedHost))
self.assertEqual(weighted_host.to_dict(), dict(weight=1, host='x',
blob='y', zone='z'))
+
+ def test_get_cost_functions(self):
+ fixture = ds_fakes.FakeDistributedScheduler()
+ fns = fixture.get_cost_functions()
+ self.assertEquals(len(fns), 1)
+ weight, fn = fns[0]
+ self.assertEquals(weight, 1.0)
+ hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
+ self.assertEquals(1000, fn(hostinfo))
diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py
index 96f26b23c..62131a625 100644
--- a/nova/tests/scheduler/test_host_filter.py
+++ b/nova/tests/scheduler/test_host_filter.py
@@ -56,7 +56,8 @@ class HostFilterTestCase(test.TestCase):
def setUp(self):
super(HostFilterTestCase, self).setUp()
default_host_filters = ['AllHostsFilter']
- self.flags(default_host_filters=default_host_filters)
+ self.flags(default_host_filters=default_host_filters,
+ reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.instance_type = dict(name='tiny',
memory_mb=30,
vcpus=10,
@@ -121,7 +122,7 @@ class HostFilterTestCase(test.TestCase):
hf = hfs[0]
all_hosts = self._get_all_hosts()
cooked = hf.instance_type_to_filter(self.instance_type)
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(4, len(hosts))
for host, capabilities in hosts:
self.assertTrue(host.startswith('host'))
@@ -131,7 +132,7 @@ class HostFilterTestCase(test.TestCase):
# filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(3, len(hosts))
just_hosts = [host for host, hostinfo in hosts]
just_hosts.sort()
@@ -139,12 +140,26 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host3', just_hosts[1])
self.assertEquals('host2', just_hosts[0])
+ def test_instance_type_filter_reserved_memory(self):
+ self.flags(reserved_host_memory_mb=2048)
+ hf = nova.scheduler.filters.InstanceTypeFilter()
+ # filter all hosts that can support 30 ram and 300 disk after
+ # reserving 2048 ram
+ cooked = hf.instance_type_to_filter(self.instance_type)
+ all_hosts = self._get_all_hosts()
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
+ self.assertEquals(2, len(hosts))
+ just_hosts = [host for host, hostinfo in hosts]
+ just_hosts.sort()
+ self.assertEquals('host4', just_hosts[1])
+ self.assertEquals('host3', just_hosts[0])
+
def test_instance_type_filter_extra_specs(self):
hf = nova.scheduler.filters.InstanceTypeFilter()
# filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.gpu_instance_type)
all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(1, len(hosts))
just_hosts = [host for host, caps in hosts]
self.assertEquals('host4', just_hosts[0])
@@ -154,7 +169,7 @@ class HostFilterTestCase(test.TestCase):
# filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(2, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
@@ -174,7 +189,7 @@ class HostFilterTestCase(test.TestCase):
]
]
cooked = json.dumps(raw)
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(3, len(hosts))
just_hosts = [host for host, caps in hosts]
@@ -186,7 +201,7 @@ class HostFilterTestCase(test.TestCase):
['=', '$compute.host_memory_free', 30],
]
cooked = json.dumps(raw)
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(3, len(hosts))
just_hosts = [host for host, caps in hosts]
@@ -196,7 +211,7 @@ class HostFilterTestCase(test.TestCase):
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
cooked = json.dumps(raw)
- hosts = hf.filter_hosts(all_hosts, cooked)
+ hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(2, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
@@ -207,32 +222,32 @@ class HostFilterTestCase(test.TestCase):
raw = ['unknown command', ]
cooked = json.dumps(raw)
try:
- hf.filter_hosts(all_hosts, cooked)
+ hf.filter_hosts(all_hosts, cooked, {})
self.fail("Should give KeyError")
except KeyError, e:
pass
- self.assertTrue(hf.filter_hosts(all_hosts, json.dumps([])))
- self.assertTrue(hf.filter_hosts(all_hosts, json.dumps({})))
+ self.assertTrue(hf.filter_hosts(all_hosts, json.dumps([]), {}))
+ self.assertTrue(hf.filter_hosts(all_hosts, json.dumps({}), {}))
self.assertTrue(hf.filter_hosts(all_hosts, json.dumps(
['not', True, False, True, False],
- )))
+ ), {}))
try:
hf.filter_hosts(all_hosts, json.dumps(
- 'not', True, False, True, False,
- ))
+ 'not', True, False, True, False,), {})
self.fail("Should give KeyError")
except KeyError, e:
pass
self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(['=', '$foo', 100])))
+ json.dumps(['=', '$foo', 100]), {}))
self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(['=', '$.....', 100])))
+ json.dumps(['=', '$.....', 100]), {}))
self.assertFalse(hf.filter_hosts(all_hosts,
json.dumps(
- ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
+ ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]),
+ {}))
self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(['=', {}, ['>', '$missing....foo']])))
+ json.dumps(['=', {}, ['>', '$missing....foo']]), {}))
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index ba6cdb686..4a3af2eac 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -21,40 +21,24 @@ from nova import test
from nova.tests.scheduler import fake_zone_manager
-def offset(hostinfo):
+def offset(hostinfo, options):
return hostinfo.free_ram_mb + 10000
-def scale(hostinfo):
+def scale(hostinfo, options):
return hostinfo.free_ram_mb * 2
class LeastCostTestCase(test.TestCase):
def setUp(self):
super(LeastCostTestCase, self).setUp()
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.zone_manager = fake_zone_manager.FakeZoneManager()
def tearDown(self):
super(LeastCostTestCase, self).tearDown()
- def test_normalize_grid(self):
- raw = [
- [1, 2, 3, 4, 5],
- [10, 20, 30, 40, 50],
- [100, 200, 300, 400, 500],
- ]
- expected = [
- [.2, .4, .6, .8, 1.0],
- [.2, .4, .6, .8, 1.0],
- [.2, .4, .6, .8, 1.0],
- ]
-
- self.assertEquals(expected, least_cost.normalize_grid(raw))
-
- self.assertEquals([[]], least_cost.normalize_grid([]))
- self.assertEquals([[]], least_cost.normalize_grid([[]]))
-
def test_weighted_sum_happy_day(self):
fn_tuples = [(1.0, offset), (1.0, scale)]
hostinfo_list = self.zone_manager.get_all_host_data(None).items()
@@ -68,16 +52,14 @@ class LeastCostTestCase(test.TestCase):
# [10000, 11536, 13072, 18192]
# [0, 768, 1536, 4096]
- # normalized =
- # [ 0.55, 0.63, 0.72, 1.0]
- # [ 0.0, 0.19, 0.38, 1.0]
-
# adjusted [ 1.0 * x + 1.0 * y] =
- # [0.55, 0.82, 1.1, 2.0]
+ # [10000, 12304, 14608, 22288]
# so, host1 should win:
- weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples)
- self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01)
+ options = {}
+ weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
+ options)
+ self.assertEqual(weighted_host.weight, 10000)
self.assertEqual(weighted_host.host, 'host1')
def test_weighted_sum_single_function(self):
@@ -92,18 +74,9 @@ class LeastCostTestCase(test.TestCase):
# [offset, ]=
# [10000, 11536, 13072, 18192]
- # normalized =
- # [ 0.55, 0.63, 0.72, 1.0]
-
# so, host1 should win:
- weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples)
- self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01)
+ options = {}
+ weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
+ options)
+ self.assertEqual(weighted_host.weight, 10000)
self.assertEqual(weighted_host.host, 'host1')
-
- def test_get_cost_functions(self):
- fns = least_cost.get_cost_fns()
- self.assertEquals(len(fns), 1)
- weight, fn = fns[0]
- self.assertEquals(weight, 1.0)
- hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
- self.assertEquals(1000, fn(hostinfo))
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index df1ccce61..2c32bbd94 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -21,12 +21,10 @@ Tests For Scheduler
import datetime
import mox
-import stubout
from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions
-from mox import IgnoreArg
from nova import context
from nova import db
from nova import exception
@@ -35,13 +33,10 @@ from nova import service
from nova import test
from nova import rpc
from nova import utils
-from nova.db.sqlalchemy import models
from nova.scheduler import api
from nova.scheduler import driver
from nova.scheduler import manager
-from nova.scheduler import multi
from nova.scheduler.simple import SimpleScheduler
-from nova.scheduler.zone import ZoneScheduler
from nova.compute import power_state
from nova.compute import vm_states
@@ -84,7 +79,7 @@ def _create_volume():
"""Create a test volume"""
vol = {}
vol['size'] = 1
- vol['availability_zone'] = 'test'
+ vol['availability_zone'] = 'nova'
ctxt = context.get_admin_context()
return db.volume_create(ctxt, vol)['id']
@@ -250,77 +245,6 @@ class SchedulerTestCase(test.TestCase):
db.instance_destroy(ctxt, i_ref2['id'])
-class ZoneSchedulerTestCase(test.TestCase):
- """Test case for zone scheduler"""
- def setUp(self):
- super(ZoneSchedulerTestCase, self).setUp()
- self.flags(
- scheduler_driver='nova.scheduler.multi.MultiScheduler',
- compute_scheduler_driver='nova.scheduler.zone.ZoneScheduler',
- volume_scheduler_driver='nova.scheduler.zone.ZoneScheduler')
-
- def _create_service_model(self, **kwargs):
- service = db.sqlalchemy.models.Service()
- service.host = kwargs['host']
- service.disabled = False
- service.deleted = False
- service.report_count = 0
- service.binary = 'nova-compute'
- service.topic = 'compute'
- service.id = kwargs['id']
- service.availability_zone = kwargs['zone']
- service.created_at = utils.utcnow()
- return service
-
- def test_with_two_zones(self):
- scheduler = manager.SchedulerManager()
- ctxt = context.RequestContext('user', 'project')
- service_list = [self._create_service_model(id=1,
- host='host1',
- zone='zone1'),
- self._create_service_model(id=2,
- host='host2',
- zone='zone2'),
- self._create_service_model(id=3,
- host='host3',
- zone='zone2'),
- self._create_service_model(id=4,
- host='host4',
- zone='zone2'),
- self._create_service_model(id=5,
- host='host5',
- zone='zone2')]
-
- request_spec = _create_request_spec(availability_zone='zone1')
-
- fake_instance = _create_instance_dict(
- **request_spec['instance_properties'])
- fake_instance['id'] = 100
- fake_instance['uuid'] = FAKE_UUID
-
- self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
- self.mox.StubOutWithMock(db, 'instance_update')
- # Assumes we're testing with MultiScheduler
- compute_sched_driver = scheduler.driver.drivers['compute']
- self.mox.StubOutWithMock(compute_sched_driver,
- 'create_instance_db_entry')
- self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
-
- arg = IgnoreArg()
- db.service_get_all_by_topic(arg, arg).AndReturn(service_list)
- compute_sched_driver.create_instance_db_entry(arg,
- request_spec).AndReturn(fake_instance)
- db.instance_update(arg, 100, {'host': 'host1', 'scheduled_at': arg})
- rpc.cast(arg,
- 'compute.host1',
- {'method': 'run_instance',
- 'args': {'instance_id': 100}})
- self.mox.ReplayAll()
- scheduler.run_instance(ctxt,
- 'compute',
- request_spec=request_spec)
-
-
class SimpleDriverTestCase(test.TestCase):
"""Test case for simple driver"""
def setUp(self):
@@ -362,6 +286,25 @@ class SimpleDriverTestCase(test.TestCase):
db.compute_node_create(self.context, dic)
return db.service_get(self.context, s_ref['id'])
+ def test_regular_user_can_schedule(self):
+ """Ensures a non-admin can run an instance"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ instance_id = _create_instance()['id']
+ ctxt = context.RequestContext('fake', 'fake', False)
+ global instance_ids
+ instance_ids = []
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+ request_spec = _create_request_spec()
+ self.scheduler.driver.schedule_run_instance(ctxt, request_spec)
+ compute1.kill()
+
def test_doesnt_report_disabled_hosts_as_up_no_queue(self):
"""Ensures driver doesn't find hosts before they are enabled"""
# NOTE(vish): constructing service without create method
@@ -444,7 +387,7 @@ class SimpleDriverTestCase(test.TestCase):
compute2.kill()
def test_specific_host_gets_instance_no_queue(self):
- """Ensures if you set availability_zone it launches on that zone"""
+ """Ensures if you set zone:host it launches on that host"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
@@ -501,7 +444,7 @@ class SimpleDriverTestCase(test.TestCase):
'cast_to_compute_host', _fake_cast_to_compute_host)
request_spec = _create_request_spec(availability_zone='nova:host1')
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
request_spec)
@@ -533,6 +476,78 @@ class SimpleDriverTestCase(test.TestCase):
compute1.terminate_instance(self.context, instance_ids[0])
compute1.kill()
+ def test_specific_zone_gets_instance_no_queue(self):
+ """Ensures if you set availability_zone it launches on that zone"""
+ self.flags(node_availability_zone='zone1')
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ self.flags(node_availability_zone='zone2')
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+
+ global instance_ids
+ instance_ids = []
+ instance_ids.append(_create_instance()['id'])
+ compute1.run_instance(self.context, instance_ids[0])
+
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec(availability_zone='zone1')
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+ self.assertEqual(_picked_host, 'host1')
+ self.assertEqual(len(instance_ids), 2)
+
+ compute1.terminate_instance(self.context, instance_ids[0])
+ compute1.terminate_instance(self.context, instance_ids[1])
+ compute1.kill()
+ compute2.kill()
+
+ def test_bad_instance_zone_fails(self):
+ self.flags(node_availability_zone='zone1')
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ request_spec = _create_request_spec(availability_zone='zone2')
+ try:
+ self.assertRaises(exception.NoValidHost,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ request_spec)
+ finally:
+ compute1.kill()
+
+ def test_bad_volume_zone_fails(self):
+ self.flags(node_availability_zone='zone1')
+ volume1 = service.Service('host1',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume1.start()
+ # uses 'nova' for zone
+ volume_id = _create_volume()
+ try:
+ self.assertRaises(exception.NoValidHost,
+ self.scheduler.driver.schedule_create_volume,
+ self.context,
+ volume_id)
+ finally:
+ db.volume_destroy(self.context, volume_id)
+ volume1.kill()
+
def test_too_many_cores_no_queue(self):
"""Ensures we don't go over max cores"""
compute1 = service.Service('host1',
@@ -555,7 +570,7 @@ class SimpleDriverTestCase(test.TestCase):
compute2.run_instance(self.context, instance_id)
instance_ids2.append(instance_id)
request_spec = _create_request_spec()
- self.assertRaises(driver.NoValidHost,
+ self.assertRaises(exception.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
request_spec)
@@ -679,7 +694,7 @@ class SimpleDriverTestCase(test.TestCase):
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
request_spec = _create_request_spec(availability_zone='nova:host1')
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
request_spec)
@@ -734,7 +749,7 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec()
- self.assertRaises(driver.NoValidHost,
+ self.assertRaises(exception.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
request_spec)
@@ -780,7 +795,7 @@ class SimpleDriverTestCase(test.TestCase):
volume2.create_volume(self.context, volume_id)
volume_ids2.append(volume_id)
volume_id = _create_volume()
- self.assertRaises(driver.NoValidHost,
+ self.assertRaises(exception.NoValidHost,
self.scheduler.driver.schedule_create_volume,
self.context,
volume_id)
@@ -914,7 +929,7 @@ class SimpleDriverTestCase(test.TestCase):
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_same_host(self):
- """Confirms exceptioin raises in case dest and src is same host."""
+ """Confirms exception raises in case dest and src is same host."""
instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
diff --git a/nova/tests/scheduler/test_scheduler_options.py b/nova/tests/scheduler/test_scheduler_options.py
new file mode 100644
index 000000000..d99452b59
--- /dev/null
+++ b/nova/tests/scheduler/test_scheduler_options.py
@@ -0,0 +1,138 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For PickledScheduler.
+"""
+
+import datetime
+import json
+import StringIO
+
+from nova.scheduler import scheduler_options
+from nova import test
+
+
+class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
+ def __init__(self, last_checked, now, file_old, file_now, data, filedata):
+ super(FakeSchedulerOptions, self).__init__()
+ # Change internals ...
+ self.last_modified = file_old
+ self.last_checked = last_checked
+ self.data = data
+
+ # For overrides ...
+ self._time_now = now
+ self._file_now = file_now
+ self._file_data = filedata
+
+ self.file_was_loaded = False
+
+ def _get_file_timestamp(self, filename):
+ return self._file_now
+
+ def _get_file_handle(self, filename):
+ self.file_was_loaded = True
+ return StringIO.StringIO(self._file_data)
+
+ def _get_time_now(self):
+ return self._time_now
+
+
+class SchedulerOptionsTestCase(test.TestCase):
+ def test_get_configuration_first_time_no_flag(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = json.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEquals({}, fake.get_configuration())
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_first_time_empty_file(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = ""
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEquals({}, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
+
+ def test_get_configuration_first_time_happy_day(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = json.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEquals(data, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_no_change(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = json.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ data, jdata)
+ self.assertEquals(data, fake.get_configuration('foo.json'))
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_too_fast(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2011, 1, 1, 1, 1, 2)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
+
+ old_data = dict(a=1, b=2, c=3)
+ data = dict(a=11, b=12, c=13)
+ jdata = json.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ old_data, jdata)
+ self.assertEquals(old_data, fake.get_configuration('foo.json'))
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_change(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
+
+ old_data = dict(a=1, b=2, c=3)
+ data = dict(a=11, b=12, c=13)
+ jdata = json.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ old_data, jdata)
+ self.assertEquals(data, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
index 802946e1b..9a74c9948 100644
--- a/nova/tests/scheduler/test_vsa_scheduler.py
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -28,7 +28,6 @@ from nova import utils
from nova.volume import volume_types
from nova.scheduler import vsa as vsa_sched
-from nova.scheduler import driver
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.scheduler.vsa')
@@ -274,7 +273,7 @@ class VsaSchedulerTestCase(test.TestCase):
drive_type_num=5,
init_num_drives=1)
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.NoValidHost,
self.sched.schedule_create_volumes,
self.context,
request_spec,
@@ -291,7 +290,7 @@ class VsaSchedulerTestCase(test.TestCase):
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.NoValidHost,
self.sched.schedule_create_volumes,
self.context,
request_spec,
@@ -314,7 +313,7 @@ class VsaSchedulerTestCase(test.TestCase):
self.service_states = new_states
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.NoValidHost,
self.sched.schedule_create_volumes,
self.context,
request_spec,
@@ -365,7 +364,7 @@ class VsaSchedulerTestCase(test.TestCase):
availability_zone=None)
self._print_service_states()
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.NoValidHost,
self.sched.schedule_create_volumes,
self.context,
new_request,
@@ -393,7 +392,7 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(self.sched,
'service_is_up', self._fake_service_is_up_False)
- self.assertRaises(driver.WillNotSchedule,
+ self.assertRaises(exception.WillNotSchedule,
self.sched.schedule_create_volumes,
self.context,
request_spec,
@@ -483,7 +482,7 @@ class VsaSchedulerTestCase(test.TestCase):
global_volume = {}
global_volume['volume_type_id'] = None
- self.assertRaises(driver.NoValidHost,
+ self.assertRaises(exception.NoValidHost,
self.sched.schedule_create_volume,
self.context,
123,
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index addb6084d..e6382a211 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -44,6 +44,7 @@ from nova.db.sqlalchemy import models
from nova.image import fake as fake_image
from nova.notifier import test_notifier
from nova.tests import fake_network
+from nova.network.quantum import client as quantum_client
LOG = logging.getLogger('nova.tests.compute')
@@ -96,16 +97,15 @@ def nop_report_driver_status(self):
pass
-class ComputeTestCase(test.TestCase):
- """Test case for compute"""
+class BaseTestCase(test.TestCase):
+
def setUp(self):
- super(ComputeTestCase, self).setUp()
+ super(BaseTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
- self.compute_api = compute.API()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
@@ -161,46 +161,8 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project_id}
return db.security_group_create(self.context, values)
- def test_create_instance_defaults_display_name(self):
- """Verify that an instance cannot be created without a display_name."""
- cases = [dict(), dict(display_name=None)]
- for instance in cases:
- (ref, resv_id) = self.compute_api.create(self.context,
- instance_types.get_default_instance_type(), None, **instance)
- try:
- self.assertNotEqual(ref[0]['display_name'], None)
- finally:
- db.instance_destroy(self.context, ref[0]['id'])
- def test_create_instance_associates_security_groups(self):
- """Make sure create associates security groups"""
- group = self._create_group()
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=instance_types.get_default_instance_type(),
- image_href=None,
- security_group=['testgroup'])
- try:
- self.assertEqual(len(db.security_group_get_by_instance(
- self.context, ref[0]['id'])), 1)
- group = db.security_group_get(self.context, group['id'])
- self.assert_(len(group.instances) == 1)
- finally:
- db.security_group_destroy(self.context, group['id'])
- db.instance_destroy(self.context, ref[0]['id'])
-
- def test_create_instance_with_invalid_security_group_raises(self):
- instance_type = instance_types.get_default_instance_type()
-
- pre_build_len = len(db.instance_get_all(context.get_admin_context()))
- self.assertRaises(exception.SecurityGroupNotFoundForProject,
- self.compute_api.create,
- self.context,
- instance_type=instance_type,
- image_href=None,
- security_group=['this_is_a_fake_sec_group'])
- self.assertEqual(pre_build_len,
- len(db.instance_get_all(context.get_admin_context())))
+class ComputeTestCase(BaseTestCase):
def test_create_instance_with_img_ref_associates_config_drive(self):
"""Make sure create associates a config drive."""
@@ -230,53 +192,6 @@ class ComputeTestCase(test.TestCase):
finally:
db.instance_destroy(self.context, instance_id)
- def test_default_hostname_generator(self):
- cases = [(None, 'server-1'), ('Hello, Server!', 'hello-server'),
- ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
- ('hello_server', 'hello-server')]
- for display_name, hostname in cases:
- (ref, resv_id) = self.compute_api.create(self.context,
- instance_types.get_default_instance_type(), None,
- display_name=display_name)
- try:
- self.assertEqual(ref[0]['hostname'], hostname)
- finally:
- db.instance_destroy(self.context, ref[0]['id'])
-
- def test_destroy_instance_disassociates_security_groups(self):
- """Make sure destroying disassociates security groups"""
- group = self._create_group()
-
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=instance_types.get_default_instance_type(),
- image_href=None,
- security_group=['testgroup'])
- try:
- db.instance_destroy(self.context, ref[0]['id'])
- group = db.security_group_get(self.context, group['id'])
- self.assert_(len(group.instances) == 0)
- finally:
- db.security_group_destroy(self.context, group['id'])
-
- def test_destroy_security_group_disassociates_instances(self):
- """Make sure destroying security groups disassociates instances"""
- group = self._create_group()
-
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=instance_types.get_default_instance_type(),
- image_href=None,
- security_group=['testgroup'])
-
- try:
- db.security_group_destroy(self.context, group['id'])
- group = db.security_group_get(context.get_admin_context(
- read_deleted=True), group['id'])
- self.assert_(len(group.instances) == 0)
- finally:
- db.instance_destroy(self.context, ref[0]['id'])
-
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""
instance_id = self._create_instance()
@@ -342,27 +257,55 @@ class ComputeTestCase(test.TestCase):
self.compute.resume_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
- def test_soft_reboot(self):
+ def test_reboot_soft(self):
"""Ensure instance can be soft rebooted"""
instance_id = self._create_instance()
- reboot_type = "SOFT"
self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id,
+ {'task_state': task_states.REBOOTING})
+
+ reboot_type = "SOFT"
self.compute.reboot_instance(self.context, instance_id, reboot_type)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
+ self.assertEqual(inst_ref['task_state'], None)
+
self.compute.terminate_instance(self.context, instance_id)
- def test_hard_reboot(self):
+ def test_reboot_hard(self):
"""Ensure instance can be hard rebooted"""
instance_id = self._create_instance()
- reboot_type = "HARD"
self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id,
+ {'task_state': task_states.REBOOTING_HARD})
+
+ reboot_type = "HARD"
self.compute.reboot_instance(self.context, instance_id, reboot_type)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
+ self.assertEqual(inst_ref['task_state'], None)
+
self.compute.terminate_instance(self.context, instance_id)
def test_set_admin_password(self):
"""Ensure instance can have its admin password set"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id,
+ {'task_state': task_states.UPDATING_PASSWORD})
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
+
self.compute.set_admin_password(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(inst_ref['task_state'], None)
+
self.compute.terminate_instance(self.context, instance_id)
def test_inject_file(self):
@@ -389,36 +332,6 @@ class ComputeTestCase(test.TestCase):
self.compute.snapshot_instance(self.context, instance_id, name)
self.compute.terminate_instance(self.context, instance_id)
- def test_snapshot_conflict_backup(self):
- """Can't backup an instance which is already being backed up."""
- instance_id = self._create_instance()
- instance_values = {'task_state': task_states.IMAGE_BACKUP}
- db.instance_update(self.context, instance_id, instance_values)
-
- self.assertRaises(exception.InstanceBackingUp,
- self.compute_api.backup,
- self.context,
- instance_id,
- None,
- None,
- None)
-
- db.instance_destroy(self.context, instance_id)
-
- def test_snapshot_conflict_snapshot(self):
- """Can't snapshot an instance which is already being snapshotted."""
- instance_id = self._create_instance()
- instance_values = {'task_state': task_states.IMAGE_SNAPSHOT}
- db.instance_update(self.context, instance_id, instance_values)
-
- self.assertRaises(exception.InstanceSnapshotting,
- self.compute_api.snapshot,
- self.context,
- instance_id,
- None)
-
- db.instance_destroy(self.context, instance_id)
-
def test_console_output(self):
"""Make sure we can get console output from instance"""
instance_id = self._create_instance()
@@ -507,9 +420,11 @@ class ComputeTestCase(test.TestCase):
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertEquals(payload['state'], 'active')
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
+ self.assertTrue(payload['launched_at'])
self.assertEquals(payload['image_ref'], '1')
self.compute.terminate_instance(self.context, instance_id)
@@ -551,6 +466,50 @@ class ComputeTestCase(test.TestCase):
instance_id)
self.compute.terminate_instance(self.context, instance_id)
+ def test_instance_set_to_error_on_uncaught_exception(self):
+ """Test that instance is set to error state when exception is raised"""
+ instance_id = self._create_instance()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False).\
+ AndRaise(quantum_client.QuantumServerException())
+
+ FLAGS.stub_network = False
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(quantum_client.QuantumServerException,
+ self.compute.run_instance,
+ self.context,
+ instance_id)
+
+ instances = db.instance_get_all(context.get_admin_context())
+ self.assertEqual(vm_states.ERROR, instances[0]['vm_state'])
+
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_network_is_deallocated_on_spawn_failure(self):
+ """When a spawn fails the network must be deallocated"""
+ instance_id = self._create_instance()
+
+ self.mox.StubOutWithMock(self.compute, "_setup_block_device_mapping")
+ self.compute._setup_block_device_mapping(mox.IgnoreArg(),
+ mox.IgnoreArg()).\
+ AndRaise(rpc.common.RemoteError('', '', ''))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(rpc.common.RemoteError,
+ self.compute.run_instance,
+ self.context,
+ instance_id)
+
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_lock(self):
"""ensure locked instance cannot be changed"""
instance_id = self._create_instance()
@@ -641,44 +600,6 @@ class ComputeTestCase(test.TestCase):
migration_ref['id'])
self.compute.terminate_instance(context, instance_id)
- def test_resize_invalid_flavor_fails(self):
- """Ensure invalid flavors raise"""
- instance_id = self._create_instance()
- context = self.context.elevated()
- self.compute.run_instance(self.context, instance_id)
-
- self.assertRaises(exception.NotFound, self.compute_api.resize,
- context, instance_id, 200)
-
- self.compute.terminate_instance(context, instance_id)
-
- def test_resize_down_fails(self):
- """Ensure resizing down raises and fails"""
- context = self.context.elevated()
- instance_id = self._create_instance()
-
- self.compute.run_instance(self.context, instance_id)
- inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
- db.instance_update(self.context, instance_id,
- {'instance_type_id': inst_type['id']})
-
- self.assertRaises(exception.CannotResizeToSmallerSize,
- self.compute_api.resize, context, instance_id, 1)
-
- self.compute.terminate_instance(context, instance_id)
-
- def test_resize_same_size_fails(self):
- """Ensure invalid flavors raise"""
- context = self.context.elevated()
- instance_id = self._create_instance()
-
- self.compute.run_instance(self.context, instance_id)
-
- self.assertRaises(exception.CannotResizeToSameSize,
- self.compute_api.resize, context, instance_id, 1)
-
- self.compute.terminate_instance(context, instance_id)
-
def test_finish_revert_resize(self):
"""Ensure that the flavor is reverted to the original on revert"""
context = self.context.elevated()
@@ -746,12 +667,28 @@ class ComputeTestCase(test.TestCase):
self.context, inst_ref['uuid'], 1)
self.compute.terminate_instance(self.context, instance_id)
- def test_migrate(self):
- context = self.context.elevated()
+ def test_resize_instance_handles_migration_error(self):
+ """Ensure vm_state is ERROR when MigrationError occurs"""
+ def raise_migration_failure(*args):
+ raise exception.MigrationError(reason='test failure')
+ self.stubs.Set(self.compute.driver,
+ 'migrate_disk_and_power_off',
+ raise_migration_failure)
+
instance_id = self._create_instance()
+ context = self.context.elevated()
+ inst_ref = db.instance_get(context, instance_id)
+
self.compute.run_instance(self.context, instance_id)
- # Migrate simply calls resize() without a flavor_id.
- self.compute_api.resize(context, instance_id, None)
+ db.instance_update(self.context, inst_ref['uuid'], {'host': 'foo'})
+ self.compute.prep_resize(context, inst_ref['uuid'], 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ inst_ref['uuid'], 'pre-migrating')
+ self.compute.resize_instance(context,
+ inst_ref['uuid'],
+ migration_ref['id'])
+ inst_ref = db.instance_get(context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(context, instance_id)
def _setup_other_managers(self):
@@ -938,6 +875,535 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(len(instances), 1)
self.assertEqual(power_state.NOSTATE, instances[0]['power_state'])
+
+class ComputeAPITestCase(BaseTestCase):
+
+ def setUp(self):
+ super(ComputeAPITestCase, self).setUp()
+ self.compute_api = compute.API()
+ self.fake_image = {
+ 'id': 1,
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 1},
+ }
+
+ def test_create_with_too_little_ram(self):
+ """Test an instance type with too little memory"""
+
+ inst_type = instance_types.get_default_instance_type()
+ inst_type['memory_mb'] = 1
+
+ def fake_show(*args):
+ img = copy(self.fake_image)
+ img['min_ram'] = 2
+ return img
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ self.assertRaises(exception.InstanceTypeMemoryTooSmall,
+ self.compute_api.create, self.context, inst_type, None)
+
+ # Now increase the inst_type memory and make sure all is fine.
+ inst_type['memory_mb'] = 2
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
+
+ def test_create_with_too_little_disk(self):
+ """Test an instance type with too little disk space"""
+
+ inst_type = instance_types.get_default_instance_type()
+ inst_type['local_gb'] = 1
+
+ def fake_show(*args):
+ img = copy(self.fake_image)
+ img['min_disk'] = 2
+ return img
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ self.assertRaises(exception.InstanceTypeDiskTooSmall,
+ self.compute_api.create, self.context, inst_type, None)
+
+ # Now increase the inst_type disk space and make sure all is fine.
+ inst_type['local_gb'] = 2
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
+
+ def test_create_just_enough_ram_and_disk(self):
+ """Test an instance type with just enough ram and disk space"""
+
+ inst_type = instance_types.get_default_instance_type()
+ inst_type['local_gb'] = 2
+ inst_type['memory_mb'] = 2
+
+ def fake_show(*args):
+ img = copy(self.fake_image)
+ img['min_ram'] = 2
+ img['min_disk'] = 2
+ return img
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
+
+ def test_create_with_no_ram_and_disk_reqs(self):
+ """Test an instance type with no min_ram or min_disk"""
+
+ inst_type = instance_types.get_default_instance_type()
+ inst_type['local_gb'] = 1
+ inst_type['memory_mb'] = 1
+
+ def fake_show(*args):
+ return copy(self.fake_image)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
+
+ def test_create_instance_defaults_display_name(self):
+ """Verify that an instance cannot be created without a display_name."""
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ (ref, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None, **instance)
+ try:
+ self.assertNotEqual(ref[0]['display_name'], None)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_create_instance_associates_security_groups(self):
+ """Make sure create associates security groups"""
+ group = self._create_group()
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=instance_types.get_default_instance_type(),
+ image_href=None,
+ security_group=['testgroup'])
+ try:
+ self.assertEqual(len(db.security_group_get_by_instance(
+ self.context, ref[0]['id'])), 1)
+ group = db.security_group_get(self.context, group['id'])
+ self.assert_(len(group.instances) == 1)
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_create_instance_with_invalid_security_group_raises(self):
+ instance_type = instance_types.get_default_instance_type()
+
+ pre_build_len = len(db.instance_get_all(context.get_admin_context()))
+ self.assertRaises(exception.SecurityGroupNotFoundForProject,
+ self.compute_api.create,
+ self.context,
+ instance_type=instance_type,
+ image_href=None,
+ security_group=['this_is_a_fake_sec_group'])
+ self.assertEqual(pre_build_len,
+ len(db.instance_get_all(context.get_admin_context())))
+
+ def test_default_hostname_generator(self):
+ cases = [(None, 'server-1'), ('Hello, Server!', 'hello-server'),
+ ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
+ ('hello_server', 'hello-server')]
+ for display_name, hostname in cases:
+ (ref, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ display_name=display_name)
+ try:
+ self.assertEqual(ref[0]['hostname'], hostname)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_destroy_instance_disassociates_security_groups(self):
+ """Make sure destroying disassociates security groups"""
+ group = self._create_group()
+
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=instance_types.get_default_instance_type(),
+ image_href=None,
+ security_group=['testgroup'])
+ try:
+ db.instance_destroy(self.context, ref[0]['id'])
+ group = db.security_group_get(self.context, group['id'])
+ self.assert_(len(group.instances) == 0)
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+
+ def test_destroy_security_group_disassociates_instances(self):
+ """Make sure destroying security groups disassociates instances"""
+ group = self._create_group()
+
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=instance_types.get_default_instance_type(),
+ image_href=None,
+ security_group=['testgroup'])
+
+ try:
+ db.security_group_destroy(self.context, group['id'])
+ group = db.security_group_get(context.get_admin_context(
+ read_deleted=True), group['id'])
+ self.assert_(len(group.instances) == 0)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_start(self):
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ self.compute.stop_instance(self.context, instance_id)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], None)
+
+ self.compute_api.start(self.context, instance)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.STARTING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_stop(self):
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], None)
+
+ self.compute_api.stop(self.context, instance)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.STOPPING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_delete(self):
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], None)
+
+ self.compute_api.delete(self.context, instance)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.DELETING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_delete_soft(self):
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], None)
+
+ self.compute_api.soft_delete(self.context, instance)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_force_delete(self):
+ """Ensure instance can be soft rebooted"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.compute_api.soft_delete(self.context, instance)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
+
+ self.compute_api.force_delete(self.context, instance)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.DELETING)
+
+ def test_suspend(self):
+ """Ensure instance can be suspended"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ self.compute_api.suspend(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], task_states.SUSPENDING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_resume(self):
+ """Ensure instance can be resumed"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ self.compute_api.resume(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], task_states.RESUMING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_pause(self):
+ """Ensure instance can be paused"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ self.compute_api.pause(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], task_states.PAUSING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_unpause(self):
+ """Ensure instance can be unpaused"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ self.compute.pause_instance(self.context, instance_id)
+
+ self.compute_api.unpause(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], task_states.UNPAUSING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_rebuild(self):
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], None)
+
+ image_ref = instance["image_ref"]
+ password = "new_password"
+ self.compute_api.rebuild(self.context, instance, image_ref, password)
+
+ instance = db.instance_get(self.context, instance_id)
+ self.assertEqual(instance['task_state'], task_states.REBUILDING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_reboot_soft(self):
+ """Ensure instance can be soft rebooted"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ reboot_type = "SOFT"
+ self.compute_api.reboot(self.context, inst_ref, reboot_type)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], task_states.REBOOTING)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_reboot_hard(self):
+ """Ensure instance can be hard rebooted"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ reboot_type = "HARD"
+ self.compute_api.reboot(self.context, inst_ref, reboot_type)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['task_state'], task_states.REBOOTING_HARD)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_set_admin_password(self):
+ """Ensure instance can have its admin password set"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ self.compute_api.set_admin_password(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
+
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_rescue_unrescue(self):
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(inst_ref['task_state'], None)
+
+ self.compute_api.rescue(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(inst_ref['task_state'], task_states.RESCUING)
+
+ params = {'vm_state': vm_states.RESCUED, 'task_state': None}
+ db.instance_update(self.context, instance_id, params)
+
+ self.compute_api.unrescue(self.context, inst_ref)
+
+ inst_ref = db.instance_get(self.context, instance_id)
+ self.assertEqual(inst_ref['vm_state'], vm_states.RESCUED)
+ self.assertEqual(inst_ref['task_state'], task_states.UNRESCUING)
+
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_snapshot(self):
+ """Can't backup an instance which is already being backed up."""
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ self.compute_api.snapshot(self.context, instance, None, None)
+ db.instance_destroy(self.context, instance_id)
+
+ def test_backup(self):
+ """Can't backup an instance which is already being backed up."""
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ self.compute_api.backup(self.context, instance, None, None, None)
+ db.instance_destroy(self.context, instance_id)
+
+ def test_backup_conflict(self):
+ """Can't backup an instance which is already being backed up."""
+ instance_id = self._create_instance()
+ instance_values = {'task_state': task_states.IMAGE_BACKUP}
+ db.instance_update(self.context, instance_id, instance_values)
+ instance = self.compute_api.get(self.context, instance_id)
+
+ self.assertRaises(exception.InstanceBackingUp,
+ self.compute_api.backup,
+ self.context,
+ instance,
+ None,
+ None,
+ None)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_snapshot_conflict(self):
+ """Can't snapshot an instance which is already being snapshotted."""
+ instance_id = self._create_instance()
+ instance_values = {'task_state': task_states.IMAGE_SNAPSHOT}
+ db.instance_update(self.context, instance_id, instance_values)
+ instance = self.compute_api.get(self.context, instance_id)
+
+ self.assertRaises(exception.InstanceSnapshotting,
+ self.compute_api.snapshot,
+ self.context,
+ instance,
+ None)
+
+ db.instance_destroy(self.context, instance_id)
+
+ def test_resize_confirm_through_api(self):
+ """Ensure invalid flavors raise"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+ instance = db.instance_get(context, instance_id)
+ self.compute.run_instance(self.context, instance_id)
+ self.compute_api.resize(context, instance, '4')
+
+ # create a fake migration record (manager does this)
+ migration_ref = db.migration_create(context,
+ {'instance_uuid': instance['uuid'],
+ 'status': 'finished'})
+
+ self.compute_api.confirm_resize(context, instance)
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_revert_through_api(self):
+ """Ensure invalid flavors raise"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+ instance = db.instance_get(context, instance_id)
+ self.compute.run_instance(self.context, instance_id)
+
+ self.compute_api.resize(context, instance, '4')
+
+ # create a fake migration record (manager does this)
+ migration_ref = db.migration_create(context,
+ {'instance_uuid': instance['uuid'],
+ 'status': 'finished'})
+
+ self.compute_api.revert_resize(context, instance)
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_invalid_flavor_fails(self):
+ """Ensure invalid flavors raise"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+ instance = db.instance_get(context, instance_id)
+ self.compute.run_instance(self.context, instance_id)
+
+ self.assertRaises(exception.NotFound, self.compute_api.resize,
+ context, instance, 200)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_down_fails(self):
+ """Ensure resizing down raises and fails"""
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+ inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
+ db.instance_update(self.context, instance_id,
+ {'instance_type_id': inst_type['id']})
+
+ instance = db.instance_get(context, instance_id)
+ self.assertRaises(exception.CannotResizeToSmallerSize,
+ self.compute_api.resize, context, instance, 1)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_same_size_fails(self):
+ """Ensure invalid flavors raise"""
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+ instance = db.instance_get(context, instance_id)
+
+ self.compute.run_instance(self.context, instance_id)
+
+ self.assertRaises(exception.CannotResizeToSameSize,
+ self.compute_api.resize, context, instance, 1)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_migrate(self):
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+ instance = db.instance_get(context, instance_id)
+ self.compute.run_instance(self.context, instance_id)
+ # Migrate simply calls resize() without a flavor_id.
+ self.compute_api.resize(context, instance, None)
+ self.compute.terminate_instance(context, instance_id)
+
def test_get_all_by_name_regexp(self):
"""Test searching instances by name (display_name)"""
c = context.get_admin_context()
@@ -1251,6 +1717,32 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(c, instance_id3)
db.instance_destroy(c, instance_id4)
+ def test_instance_metadata(self):
+ """Test searching instances by state"""
+ _context = context.get_admin_context()
+ instance_id = self._create_instance({'metadata': {'key1': 'value1'}})
+ instance = self.compute_api.get(_context, instance_id)
+
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key1': 'value1'})
+
+ self.compute_api.update_instance_metadata(_context, instance,
+ {'key2': 'value2'})
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
+
+ new_metadata = {'key2': 'bah', 'key3': 'value3'}
+ self.compute_api.update_instance_metadata(_context, instance,
+ new_metadata, delete=True)
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, new_metadata)
+
+ self.compute_api.delete_instance_metadata(_context, instance, 'key2')
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key3': 'value3'})
+
+ db.instance_destroy(_context, instance_id)
+
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
@@ -1409,25 +1901,6 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, instance['id'])
- def test_reservation_ids_two_instances_no_wait(self):
- """Verify building 2 instances at once without waiting for
- instance IDs results in a reservation_id being returned equal
- to reservation id set in both instances
- """
- (refs, resv_id) = self.compute_api.create(self.context,
- instance_types.get_default_instance_type(), None,
- min_count=2, max_count=2, wait_for_instances=False)
- try:
- self.assertEqual(refs, None)
- self.assertNotEqual(resv_id, None)
- finally:
- instances = self.compute_api.get_all(self.context,
- search_opts={'reservation_id': resv_id})
- self.assertEqual(len(instances), 2)
- for instance in instances:
- self.assertEqual(instance['reservation_id'], resv_id)
- db.instance_destroy(self.context, instance['id'])
-
def test_create_with_specified_reservation_id(self):
"""Verify building instances with a specified
reservation_id results in the correct reservation_id
@@ -1488,89 +1961,46 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(i_ref['name'], i_ref['uuid'])
db.instance_destroy(self.context, i_ref['id'])
+ def test_add_remove_fixed_ip(self):
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ self.compute_api.add_fixed_ip(self.context, instance, '1')
+ self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1')
-class ComputeTestMinRamMinDisk(test.TestCase):
- def setUp(self):
- super(ComputeTestMinRamMinDisk, self).setUp()
- self.compute = utils.import_object(FLAGS.compute_manager)
- self.compute_api = compute.API()
- self.context = context.RequestContext('fake', 'fake')
- self.stubs.Set(rpc, 'call', rpc_call_wrapper)
- self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
- self.fake_image = {
- 'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
-
- def test_create_with_too_little_ram(self):
- """Test an instance type with too little memory"""
-
- inst_type = instance_types.get_default_instance_type()
- inst_type['memory_mb'] = 1
-
- def fake_show(*args):
- img = copy(self.fake_image)
- img['min_ram'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
-
- self.assertRaises(exception.InstanceTypeMemoryTooSmall,
- self.compute_api.create, self.context, inst_type, None)
-
- # Now increase the inst_type memory and make sure all is fine.
- inst_type['memory_mb'] = 2
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
- db.instance_destroy(self.context, refs[0]['id'])
-
- def test_create_with_too_little_disk(self):
- """Test an instance type with too little disk space"""
-
- inst_type = instance_types.get_default_instance_type()
- inst_type['local_gb'] = 1
-
- def fake_show(*args):
- img = copy(self.fake_image)
- img['min_disk'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
-
- self.assertRaises(exception.InstanceTypeDiskTooSmall,
- self.compute_api.create, self.context, inst_type, None)
-
- # Now increase the inst_type disk space and make sure all is fine.
- inst_type['local_gb'] = 2
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
- db.instance_destroy(self.context, refs[0]['id'])
-
- def test_create_just_enough_ram_and_disk(self):
- """Test an instance type with just enough ram and disk space"""
+ def test_vnc_console(self):
+ """Make sure we can a vnc console for an instance."""
+ def vnc_rpc_call_wrapper(*args, **kwargs):
+ return {'token': 'asdf', 'host': '0.0.0.0', 'port': 8080}
- inst_type = instance_types.get_default_instance_type()
- inst_type['local_gb'] = 2
- inst_type['memory_mb'] = 2
+ self.stubs.Set(rpc, 'call', vnc_rpc_call_wrapper)
- def fake_show(*args):
- img = copy(self.fake_image)
- img['min_ram'] = 2
- img['min_disk'] = 2
- return img
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ console = self.compute_api.get_vnc_console(self.context, instance)
+ self.compute_api.delete(self.context, instance)
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
- db.instance_destroy(self.context, refs[0]['id'])
+ def test_ajax_console(self):
+ """Make sure we can a vnc console for an instance."""
+ def ajax_rpc_call_wrapper(*args, **kwargs):
+ return {'token': 'asdf', 'host': '0.0.0.0', 'port': 8080}
- def test_create_with_no_ram_and_disk_reqs(self):
- """Test an instance type with no min_ram or min_disk"""
+ self.stubs.Set(rpc, 'call', ajax_rpc_call_wrapper)
- inst_type = instance_types.get_default_instance_type()
- inst_type['local_gb'] = 1
- inst_type['memory_mb'] = 1
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ console = self.compute_api.get_ajax_console(self.context, instance)
+ self.compute_api.delete(self.context, instance)
- def fake_show(*args):
- return copy(self.fake_image)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ def test_console_output(self):
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ console = self.compute_api.get_console_output(self.context, instance)
+ self.compute_api.delete(self.context, instance)
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, None)
- db.instance_destroy(self.context, refs[0]['id'])
+ def test_inject_file(self):
+ """Ensure we can write a file to an instance"""
+ instance_id = self._create_instance()
+ instance = self.compute_api.get(self.context, instance_id)
+ self.compute_api.inject_file(self.context, instance,
+ "/tmp/test", "File Contents")
+ db.instance_destroy(self.context, instance_id)
diff --git a/nova/tests/test_compute_utils.py b/nova/tests/test_compute_utils.py
index 5efb10166..12b98aa0b 100644
--- a/nova/tests/test_compute_utils.py
+++ b/nova/tests/test_compute_utils.py
@@ -19,11 +19,7 @@
Tests For misc util methods used with compute.
"""
-from datetime import datetime
-from datetime import timedelta
-
from nova import db
-from nova import exception
from nova import flags
from nova import context
from nova import test
diff --git a/nova/tests/test_image.py b/nova/tests/test_image.py
index 9eeefe0e5..0cb138956 100644
--- a/nova/tests/test_image.py
+++ b/nova/tests/test_image.py
@@ -16,6 +16,7 @@
# under the License.
import datetime
+import StringIO
from nova import context
from nova import exception
@@ -128,6 +129,16 @@ class _ImageTestCase(test.TestCase):
index = self.image_service.index(self.context)
self.assertEquals(len(index), 0)
+ def test_create_then_get(self):
+ blob = 'some data'
+ s1 = StringIO.StringIO(blob)
+ self.image_service.create(self.context,
+ {'id': '32', 'foo': 'bar'},
+ data=s1)
+ s2 = StringIO.StringIO()
+ self.image_service.get(self.context, '32', data=s2)
+ self.assertEquals(s2.getvalue(), blob, 'Did not get blob back intact')
+
class FakeImageTestCase(_ImageTestCase):
def setUp(self):
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index b7864c9dd..6420aa01c 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -43,7 +43,14 @@ from nova.virt.libvirt import volume
from nova.volume import driver as volume_driver
from nova.tests import fake_network
-libvirt = None
+
+try:
+ import libvirt
+ connection.libvirt = libvirt
+except ImportError:
+ libvirt = None
+
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.test_libvirt')
@@ -228,6 +235,10 @@ class FakeVolumeDriver(object):
return ""
+def missing_libvirt():
+ return libvirt is None
+
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
@@ -248,24 +259,10 @@ class LibvirtConnTestCase(test.TestCase):
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
- 'image_ref': '123456',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'local_gb': 20,
'instance_type_id': '5'} # m1.small
- def lazy_load_library_exists(self):
- """check if libvirt is available."""
- # try to connect libvirt. if fail, skip test.
- try:
- import libvirt
- import libxml2
- except ImportError:
- return False
- global libvirt
- libvirt = __import__('libvirt')
- connection.libvirt = __import__('libvirt')
- connection.libxml2 = __import__('libxml2')
- return True
-
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtConnection(libvirt is not used)."""
@@ -352,19 +349,16 @@ class LibvirtConnTestCase(test.TestCase):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_in_ami_format(self):
- if not self.lazy_load_library_exists():
- return
-
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
image_service = utils.import_object(FLAGS.image_service)
- # Assign image_ref = 3 from nova/images/fakes for testing
- # ami image
+ # Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = "3"
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
@@ -393,10 +387,8 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_in_raw_format(self):
- if not self.lazy_load_library_exists():
- return
-
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
@@ -429,10 +421,8 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_in_qcow2_format(self):
- if not self.lazy_load_library_exists():
- return
-
self.flags(image_service='nova.image.fake.FakeImageService')
self.flags(snapshot_image_format='qcow2')
@@ -466,19 +456,17 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_snapshot_no_image_architecture(self):
- if not self.lazy_load_library_exists():
- return
-
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
image_service = utils.import_object(FLAGS.image_service)
- # Assign image_ref = 2 from nova/images/fakes for testing different
- # base image
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = "2"
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
@@ -733,12 +721,9 @@ class LibvirtConnTestCase(test.TestCase):
conn.update_available_resource,
self.context, 'dummy')
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_ensure_filtering_rules_for_instance_timeout(self):
"""ensure_filtering_fules_for_instance() finishes with timeout."""
- # Skip if non-libvirt environment
- if not self.lazy_load_library_exists():
- return
-
# Preparing mocks
def fake_none(self, *args):
return
@@ -787,12 +772,9 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(self.context, instance_ref['id'])
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_live_migration_raises_exception(self):
"""Confirms recover method is called when exceptions are raised."""
- # Skip if non-libvirt environment
- if not self.lazy_load_library_exists():
- return
-
# Preparing data
self.compute = utils.import_object(FLAGS.compute_manager)
instance_dict = {'host': 'fake',
@@ -861,13 +843,9 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertEqual(conn.pre_live_migration(vol), None)
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_pre_block_migration_works_correctly(self):
"""Confirms pre_block_migration works correctly."""
-
- # Skip if non-libvirt environment
- if not self.lazy_load_library_exists():
- return
-
# Replace instances_path since this testcase creates tmpfile
tmpdir = tempfile.mkdtemp()
store = FLAGS.instances_path
@@ -898,12 +876,9 @@ class LibvirtConnTestCase(test.TestCase):
# Restore FLAGS.instances_path
FLAGS.instances_path = store
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_get_instance_disk_info_works_correctly(self):
"""Confirms pre_block_migration works correctly."""
- # Skip if non-libvirt environment
- if not self.lazy_load_library_exists():
- return
-
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
@@ -956,11 +931,8 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(self.context, instance_ref['id'])
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_spawn_with_network_info(self):
- # Skip if non-libvirt environment
- if not self.lazy_load_library_exists():
- return
-
# Preparing mocks
def fake_none(self, instance):
return
@@ -970,7 +942,10 @@ class LibvirtConnTestCase(test.TestCase):
# create_fake_libvirt_mock() mocks utils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
- instance = db.instance_create(self.context, self.test_instance)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = db.instance_create(self.context, instance_ref)
# Start test
self.mox.ReplayAll()
@@ -983,8 +958,11 @@ class LibvirtConnTestCase(test.TestCase):
fake_none)
try:
- conn.spawn(self.context, instance, network_info)
+ conn.spawn(self.context, instance, None, network_info)
except Exception, e:
+ # assert that no exception is raised due to sha1 receiving an int
+ self.assertEqual(-1, str(e.message).find('must be string or buffer'
+ ', not int'))
count = (0 <= str(e.message).find('Unexpected method call'))
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
@@ -1075,6 +1053,96 @@ class LibvirtConnTestCase(test.TestCase):
compute_driver = driver.ComputeDriver()
self.assertRaises(NotImplementedError, compute_driver.reboot, *args)
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
+ def test_immediate_delete(self):
+ conn = connection.LibvirtConnection(False)
+ self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
+ connection.LibvirtConnection._conn.lookupByName = lambda x: None
+
+ instance = db.instance_create(self.context, self.test_instance)
+ conn.destroy(instance, {})
+
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
+ def test_destroy_saved(self):
+ """Ensure destroy calls managedSaveRemove for saved instance"""
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.destroy()
+ mock.hasManagedSaveImage(0).AndReturn(1)
+ mock.managedSaveRemove(0)
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ conn = connection.LibvirtConnection(False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ instance = {"name": "instancename", "id": "instanceid"}
+ conn.destroy(instance, [])
+
+
+class HostStateTestCase(test.TestCase):
+
+ cpu_info = '{"vendor": "Intel", "model": "pentium", "arch": "i686", '\
+ '"features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", '\
+ '"clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", '\
+ '"apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"}}'
+
+ class FakeConnection(object):
+ """Fake connection object"""
+
+ def get_vcpu_total(self):
+ return 1
+
+ def get_vcpu_used(self):
+ return 0
+
+ def get_cpu_info(self):
+ return HostStateTestCase.cpu_info
+
+ def get_local_gb_total(self):
+ return 100
+
+ def get_local_gb_used(self):
+ return 20
+
+ def get_memory_mb_total(self):
+ return 497
+
+ def get_memory_mb_used(self):
+ return 88
+
+ def get_hypervisor_type(self):
+ return 'QEMU'
+
+ def get_hypervisor_version(self):
+ return 13091
+
+ def test_update_status(self):
+ self.mox.StubOutWithMock(connection, 'get_connection')
+ connection.get_connection(True).AndReturn(self.FakeConnection())
+
+ self.mox.ReplayAll()
+ hs = connection.HostState(True)
+ stats = hs._stats
+ self.assertEquals(stats["vcpus"], 1)
+ self.assertEquals(stats["vcpus_used"], 0)
+ self.assertEquals(stats["cpu_info"], \
+ {"vendor": "Intel", "model": "pentium", "arch": "i686",
+ "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr",
+ "clflush", "pse36", "pat", "cmov", "mca", "pge",
+ "mtrr", "sep", "apic"],
+ "topology": {"cores": "1", "threads": "1", "sockets": "1"}
+ })
+ self.assertEquals(stats["disk_total"], 100)
+ self.assertEquals(stats["disk_used"], 20)
+ self.assertEquals(stats["disk_available"], 80)
+ self.assertEquals(stats["host_memory_total"], 497)
+ self.assertEquals(stats["host_memory_free"], 409)
+ self.assertEquals(stats["hypervisor_type"], 'QEMU')
+ self.assertEquals(stats["hypervisor_version"], 13091)
+
class NWFilterFakes:
def __init__(self):
@@ -1118,20 +1186,6 @@ class IptablesFirewallTestCase(test.TestCase):
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
- def lazy_load_library_exists(self):
- """check if libvirt is available."""
- # try to connect libvirt. if fail, skip test.
- try:
- import libvirt
- import libxml2
- except ImportError:
- return False
- global libvirt
- libvirt = __import__('libvirt')
- connection.libvirt = __import__('libvirt')
- connection.libxml2 = __import__('libxml2')
- return True
-
in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
@@ -1355,11 +1409,8 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
+ @test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_unfilter_instance_undefines_nwfilter(self):
- # Skip if non-libvirt environment
- if not self.lazy_load_library_exists():
- return
-
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 17e79f39a..3b043e793 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -20,7 +20,6 @@ from nova import context
from nova import db
from nova import exception
from nova import log as logging
-from nova import quota
from nova import rpc
from nova import test
from nova.network import manager as network_manager
@@ -201,6 +200,17 @@ class FlatNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
+ def test_validate_reserved(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ nets = self.network.create_networks(context_admin, 'fake',
+ '192.168.0.0/24', False, 1,
+ 256, None, None, None, None, None)
+ self.assertEqual(1, len(nets))
+ network = nets[0]
+ self.assertEqual(3, db.network_count_reserved_ips(context_admin,
+ network['id']))
+
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
@@ -289,6 +299,7 @@ class VlanNetworkTestCase(test.TestCase):
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
+ mox.IgnoreArg(),
reserved=True).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
@@ -301,6 +312,19 @@ class VlanNetworkTestCase(test.TestCase):
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(None, 0, network, vpn=True)
+ def test_vpn_allocate_fixed_ip_no_network_id(self):
+ network = dict(networks[0])
+ network['vpn_private_address'] = '192.168.0.2'
+ network['id'] = None
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.FixedIpNotFoundForNetwork,
+ self.network.allocate_fixed_ip,
+ context_admin,
+ 0,
+ network,
+ vpn=True)
+
def test_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
@@ -438,7 +462,7 @@ class VlanNetworkTestCase(test.TestCase):
# this time should raise
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake2)
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
ctxt,
ctxt.project_id)
@@ -686,7 +710,7 @@ class CommonNetworkTestCase(test.TestCase):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
- None)
+ None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
@@ -695,7 +719,7 @@ class CommonNetworkTestCase(test.TestCase):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
- None)
+ None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
@@ -710,7 +734,7 @@ class CommonNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
- None)
+ None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
@@ -729,7 +753,7 @@ class CommonNetworkTestCase(test.TestCase):
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
- None, None)
+ None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
@@ -740,7 +764,8 @@ class CommonNetworkTestCase(test.TestCase):
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
- False, 4, 256, None, None, None, None)
+ False, 4, 256, None, None, None, None,
+ None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
@@ -757,7 +782,8 @@ class CommonNetworkTestCase(test.TestCase):
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
- False, 3, 32, None, None, None, None)
+ False, 3, 32, None, None, None, None,
+ None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
@@ -775,7 +801,7 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
- None, None)
+ None, None, None)
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
@@ -784,7 +810,7 @@ class CommonNetworkTestCase(test.TestCase):
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
- None, None)
+ None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@@ -797,13 +823,13 @@ class CommonNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
# ValueError: cidr already in use
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
- None, None)
+ None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
- None, None)
+ None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
@@ -811,7 +837,8 @@ class CommonNetworkTestCase(test.TestCase):
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
- False, 2, 256, None, None, None, None)
+ False, 2, 256, None, None, None, None,
+ None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
@@ -824,7 +851,7 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
- None, None)
+ None, None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(ValueError, manager.create_networks, *args)
@@ -835,7 +862,7 @@ class CommonNetworkTestCase(test.TestCase):
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
- None]
+ None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
@@ -846,7 +873,7 @@ class CommonNetworkTestCase(test.TestCase):
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
- 'fd00::/48', None, None, None]
+ 'fd00::/48', None, None, None, None, None]
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks_many(self):
@@ -855,7 +882,7 @@ class CommonNetworkTestCase(test.TestCase):
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
- None]
+ None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_get_instance_uuids_by_ip_regex(self):
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
index 520bfbea1..5e124413f 100644
--- a/nova/tests/test_nova_manage.py
+++ b/nova/tests/test_nova_manage.py
@@ -108,11 +108,17 @@ class NetworkCommandsTestCase(test.TestCase):
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(self.fake_net)
+ def fake_network_get_by_uuid(context, uuid):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(uuid, self.fake_net['uuid'])
+ return db_fakes.FakeModel(self.fake_net)
+
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
+ self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def tearDown(self):
@@ -131,6 +137,7 @@ class NetworkCommandsTestCase(test.TestCase):
self.assertEqual(kwargs['vlan_start'], 200)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
+ self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
@@ -149,11 +156,13 @@ class NetworkCommandsTestCase(test.TestCase):
vlan_start=200,
vpn_start=2000,
fixed_range_v6='fd00:2::/120',
+ gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
- dns2='8.8.4.4')
+ dns2='8.8.4.4',
+ uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
@@ -193,6 +202,19 @@ class NetworkCommandsTestCase(test.TestCase):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_uuid',
+ self.fake_network_get_by_uuid)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(uuid=self.fake_net['uuid'])
+
+ def test_delete_by_cidr(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py
index 1a199131d..d5cd09f37 100644
--- a/nova/tests/test_quantum.py
+++ b/nova/tests/test_quantum.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import stubout
+
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -23,8 +25,12 @@ from nova import exception
from nova import ipv6
from nova import log as logging
from nova.network.quantum import manager as quantum_manager
+from nova.network.quantum import melange_connection
from nova import test
from nova import utils
+from nova.network import manager
+
+import mox
LOG = logging.getLogger('nova.tests.quantum_network')
@@ -41,7 +47,7 @@ class FakeQuantumClientConnection(object):
for net_id, n in self.nets.items():
if n['tenant-id'] == tenant_id:
net_ids.append(net_id)
- return net_ids
+ return {'networks': net_ids}
def create_network(self, tenant_id, network_name):
@@ -90,14 +96,22 @@ class FakeQuantumClientConnection(object):
"for tenant %(tenant_id)s" % locals()))
del self.nets[net_id]['ports'][port_id]
- def get_port_by_attachment(self, tenant_id, attachment_id):
- for net_id, n in self.nets.items():
- if n['tenant-id'] == tenant_id:
+ def get_port_by_attachment(self, tenant_id, net_id, attachment_id):
+ for nid, n in self.nets.items():
+ if nid == net_id and n['tenant-id'] == tenant_id:
for port_id, p in n['ports'].items():
if p['attachment-id'] == attachment_id:
- return (net_id, port_id)
+ return port_id
+ return None
+
+ def get_networks(self, tenant_id):
+ nets = []
+ for nid, n in self.nets.items():
+ if n['tenant-id'] == tenant_id:
+ x = {'id': nid}
+ nets.append(x)
+ return {'networks': nets}
- return (None, None)
networks = [{'label': 'project1-net1',
'injected': False,
@@ -175,28 +189,65 @@ networks = [{'label': 'project1-net1',
# this is a base class to be used by all other Quantum Test classes
-class QuantumTestCaseBase(object):
+class QuantumNovaTestCase(test.TestCase):
+ def setUp(self):
+ super(QuantumNovaTestCase, self).setUp()
+
+ self.net_man = quantum_manager.QuantumManager(
+ ipam_lib="nova.network.quantum.nova_ipam_lib",
+ q_conn=FakeQuantumClientConnection())
+
+ # Tests seem to create some networks by default, which
+ # we don't want. So we delete them.
+
+ ctx = context.RequestContext('user1', 'fake_project1').elevated()
+ for n in db.network_get_all(ctx):
+ db.network_delete_safe(ctx, n['id'])
+ # Other unit tests (e.g., test_compute.py) have a nasty
+ # habit of of creating fixed IPs and not cleaning up, which
+ # can confuse these tests, so we remove all existing fixed
+ # ips before starting.
+ session = get_session()
+ result = session.query(models.FixedIp).all()
+ with session.begin():
+ for fip_ref in result:
+ session.delete(fip_ref)
+
+ def _create_network(self, n):
+ ctx = context.RequestContext('user1', n['project_id'])
+ nwks = self.net_man.create_networks(
+ ctx,
+ label=n['label'], cidr=n['cidr'],
+ multi_host=n['multi_host'],
+ num_networks=1, network_size=256,
+ cidr_v6=n['cidr_v6'],
+ gateway=n['gateway'],
+ gateway_v6=n['gateway_v6'], bridge=None,
+ bridge_interface=None, dns1=n['dns1'],
+ dns2=n['dns2'],
+ project_id=n['project_id'],
+ priority=n['priority'])
+ n['uuid'] = nwks[0]['uuid']
+
+
+class QuantumNovaIPAMTestCase(QuantumNovaTestCase):
def test_create_and_delete_nets(self):
self._create_nets()
self._delete_nets()
def _create_nets(self):
for n in networks:
- ctx = context.RequestContext('user1', n['project_id'])
- self.net_man.create_networks(ctx,
- label=n['label'], cidr=n['cidr'],
- multi_host=n['multi_host'],
- num_networks=1, network_size=256, cidr_v6=n['cidr_v6'],
- gateway_v6=n['gateway_v6'], bridge=None,
- bridge_interface=None, dns1=n['dns1'],
- dns2=n['dns2'], project_id=n['project_id'],
- priority=n['priority'])
+ self._create_network(n)
def _delete_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
- self.net_man.delete_network(ctx, n['cidr'])
+ db_nets = db.network_get_all(ctx.elevated())
+ for x in db_nets:
+ if x['label'] == n['label']:
+ n['uuid'] = x['uuid']
+ self.net_man.delete_network(ctx, None, n['uuid'])
def test_allocate_and_deallocate_instance_static(self):
self._create_nets()
@@ -206,6 +257,16 @@ class QuantumTestCaseBase(object):
instance_ref = db.instance_create(ctx,
{"project_id": project_id})
+
+ def func(arg1, arg2):
+ pass
+
+ def func1(arg1):
+ pass
+
+ self.net_man.driver.update_dhcp_hostfile_with_text = func
+ self.net_man.driver.restart_dhcp = func
+ self.net_man.driver.kill_dhcp = func1
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
@@ -245,12 +306,23 @@ class QuantumTestCaseBase(object):
ctx = context.RequestContext('user1', project_id)
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
- requested_networks = [(net_id, None) for net_id in net_ids]
+ requested_networks = [(net_id, None) for net_id in
+ net_ids['networks']]
self.net_man.validate_networks(ctx, requested_networks)
instance_ref = db.instance_create(ctx,
{"project_id": project_id})
+
+ def func(arg1, arg2):
+ pass
+
+ def func1(arg1):
+ pass
+
+ self.net_man.driver.update_dhcp_hostfile_with_text = func
+ self.net_man.driver.restart_dhcp = func
+ self.net_man.driver.kill_dhcp = func1
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
@@ -299,28 +371,45 @@ class QuantumTestCaseBase(object):
self.net_man.validate_networks, ctx, [("", None)])
-class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase):
+class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
+ def test_local_mac_address_creation(self):
+ self.flags(use_melange_mac_generation=False)
+ fake_mac = "ab:cd:ef:ab:cd:ef"
+ self.stubs.Set(manager.FlatManager, "generate_mac_address",
+ lambda x: fake_mac)
+ project_id = "fake_project1"
+ ctx = context.RequestContext('user1', project_id)
+ self._create_network(networks[0])
- def setUp(self):
- super(QuantumNovaIPAMTestCase, self).setUp()
+ net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
+ requested_networks = [(net_id, None) for net_id in net_ids['networks']]
- self.net_man = quantum_manager.QuantumManager(
- ipam_lib="nova.network.quantum.nova_ipam_lib",
- q_conn=FakeQuantumClientConnection())
+ instance_ref = db.api.instance_create(ctx,
+ {"project_id": project_id})
+ nw_info = self.net_man.allocate_for_instance(ctx,
+ instance_id=instance_ref['id'], host="",
+ instance_type_id=instance_ref['instance_type_id'],
+ project_id=project_id,
+ requested_networks=requested_networks)
+ self.assertEqual(nw_info[0][1]['mac'], fake_mac)
- # Tests seem to create some networks by default, which
- # we don't want. So we delete them.
+ def test_melange_mac_address_creation(self):
+ self.flags(use_melange_mac_generation=True)
+ fake_mac = "ab:cd:ef:ab:cd:ef"
+ self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
+ lambda w, x, y, z: fake_mac)
+ project_id = "fake_project1"
+ ctx = context.RequestContext('user1', project_id)
+ self._create_network(networks[0])
- ctx = context.RequestContext('user1', 'fake_project1').elevated()
- for n in db.network_get_all(ctx):
- db.network_delete_safe(ctx, n['id'])
+ net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
+ requested_networks = [(net_id, None) for net_id in net_ids['networks']]
- # Other unit tests (e.g., test_compute.py) have a nasty
- # habit of of creating fixed IPs and not cleaning up, which
- # can confuse these tests, so we remove all existing fixed
- # ips before starting.
- session = get_session()
- result = session.query(models.FixedIp).all()
- with session.begin():
- for fip_ref in result:
- session.delete(fip_ref)
+ instance_ref = db.api.instance_create(ctx,
+ {"project_id": project_id})
+ nw_info = self.net_man.allocate_for_instance(ctx,
+ instance_id=instance_ref['id'], host="",
+ instance_type_id=instance_ref['instance_type_id'],
+ project_id=project_id,
+ requested_networks=requested_networks)
+ self.assertEqual(nw_info[0][1]['mac'], fake_mac)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 6b39adab2..449f37572 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -21,6 +21,7 @@ from nova import context
from nova import db
from nova import flags
from nova import quota
+from nova import exception
from nova import rpc
from nova import test
from nova import volume
@@ -219,7 +220,7 @@ class QuotaTestCase(test.TestCase):
instance_ids.append(instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- self.assertRaises(quota.QuotaError, compute.API().create,
+ self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
@@ -234,7 +235,7 @@ class QuotaTestCase(test.TestCase):
instance_ids.append(instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- self.assertRaises(quota.QuotaError, compute.API().create,
+ self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
@@ -248,7 +249,7 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
volume.API().create,
self.context,
size=10,
@@ -262,7 +263,7 @@ class QuotaTestCase(test.TestCase):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
volume.API().create,
self.context,
size=10,
@@ -277,7 +278,7 @@ class QuotaTestCase(test.TestCase):
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
@@ -289,7 +290,7 @@ class QuotaTestCase(test.TestCase):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- self.assertRaises(quota.QuotaError, compute.API().create,
+ self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
@@ -367,7 +368,7 @@ class QuotaTestCase(test.TestCase):
files = []
for i in xrange(FLAGS.quota_max_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
@@ -380,7 +381,7 @@ class QuotaTestCase(test.TestCase):
max = FLAGS.quota_max_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_allowed_injected_file_path_bytes(self):
@@ -398,5 +399,5 @@ class QuotaTestCase(test.TestCase):
max = FLAGS.quota_max_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
- self.assertRaises(quota.QuotaError,
+ self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 704e362fd..9dacda4b3 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -74,6 +74,12 @@ class VMWareAPIVMTestCase(test.TestCase):
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
+ self.image = {
+ 'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c',
+ 'disk_format': 'vhd',
+ 'size': 512,
+ }
+
def tearDown(self):
super(VMWareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
@@ -95,7 +101,8 @@ class VMWareAPIVMTestCase(test.TestCase):
"""Create and spawn the VM."""
self._create_instance_in_the_db()
self.type_data = db.instance_type_get_by_name(None, 'm1.large')
- self.conn.spawn(self.context, self.instance, self.network_info)
+ self.conn.spawn(self.context, self.instance, self.image,
+ self.network_info)
self._check_vm_record()
def _check_vm_record(self):
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index c807186e1..b09bacc42 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -134,21 +134,6 @@ class XenAPIVolumeTestCase(test.TestCase):
}
}
- def test_create_iscsi_storage(self):
- """This shows how to test helper classes' methods."""
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
- helper = volume_utils.VolumeHelper
- helper.XenAPI = session.get_imported_xenapi()
- vol = self._create_volume()
- info = helper.parse_volume_info(self._make_info(), '/dev/sdc')
- label = 'SR-%s' % vol['id']
- description = 'Test-SR'
- sr_ref = helper.create_iscsi_storage(session, info, label, description)
- srs = xenapi_fake.get_all('SR')
- self.assertEqual(sr_ref, srs[0])
- db.volume_destroy(context.get_admin_context(), vol['id'])
-
def test_parse_volume_info_raise_exception(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
@@ -438,7 +423,9 @@ class XenAPIVMTestCase(test.TestCase):
if empty_dns:
network_info[0][1]['dns'] = []
- self.conn.spawn(self.context, instance, network_info)
+ image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ self.conn.spawn(self.context, instance, image_meta, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
@@ -709,8 +696,10 @@ class XenAPIVMTestCase(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
+ image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
+ 'disk_format': 'vhd'}
if spawn:
- self.conn.spawn(self.context, instance, network_info)
+ self.conn.spawn(self.context, instance, image_meta, network_info)
return instance
@@ -800,12 +789,42 @@ class XenAPIMigrateInstance(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
+ def test_resize_xenserver_6(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ called = {'resize': False}
+
+ def fake_vdi_resize(*args, **kwargs):
+ called['resize'] = True
+
+ self.stubs.Set(stubs.FakeSessionForMigrationTests,
+ "VDI_resize", fake_vdi_resize)
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForMigrationTests,
+ product_version=(6, 0, 0))
+ stubs.stubout_loopingcall_start(self.stubs)
+ conn = xenapi_conn.get_connection(False)
+ conn._vmops.resize_instance(instance, '')
+ self.assertEqual(called['resize'], True)
+
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(self.context, instance, '127.0.0.1')
+ def test_migrate_disk_and_power_off_passes_exceptions(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+
+ def fake_raise(*args, **kwargs):
+ raise exception.MigrationError(reason='test failure')
+ self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
+
+ conn = xenapi_conn.get_connection(False)
+ self.assertRaises(exception.MigrationError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance, '127.0.0.1')
+
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
@@ -844,9 +863,10 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
+ image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
- network_info, resize_instance=True)
+ network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
@@ -885,9 +905,10 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
+ image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
- network_info, resize_instance=True)
+ network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
@@ -920,9 +941,10 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
+ image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
- network_info, resize_instance=True)
+ network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.instance_values)
@@ -951,9 +973,10 @@ class XenAPIMigrateInstance(test.TestCase):
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
+ image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
- network_info, resize_instance=False)
+ network_info, image_meta, resize_instance=False)
class XenAPIImageTypeTestCase(test.TestCase):
@@ -988,8 +1011,9 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
def assert_disk_type(self, disk_type):
ctx = context.RequestContext('fake', 'fake')
- dt = vm_utils.VMHelper.determine_disk_image_type(
- self.fake_instance, ctx)
+ fake_glance = glance_stubs.FakeGlance('')
+ image_meta = fake_glance.get_image_meta(self.fake_instance.image_ref)
+ dt = vm_utils.VMHelper.determine_disk_image_type(image_meta)
self.assertEqual(disk_type, dt)
def test_instance_disk(self):
@@ -1132,7 +1156,9 @@ class XenAPIManagedDiskTestCase(test.TestCase):
bootable=True):
pass
- self.stubs.Set(vm_utils.VMHelper, "create_vbd", fake_create_vbd)
+ self.stubs.Set(volume_utils.VolumeHelper,
+ "create_vbd",
+ fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index c79bda682..51ca78708 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -57,8 +57,8 @@ def stubout_instance_snapshot(stubs):
stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
-def stubout_session(stubs, cls):
- """Stubs out two methods from XenAPISession"""
+def stubout_session(stubs, cls, product_version=None):
+ """Stubs out three methods from XenAPISession"""
def fake_import(self):
"""Stubs out get_imported_xenapi of XenAPISession"""
fake_module = 'nova.virt.xenapi.fake'
@@ -69,6 +69,10 @@ def stubout_session(stubs, cls):
lambda s, url: cls(url))
stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi',
fake_import)
+ if product_version is None:
+ product_version = (5, 6, 2)
+ stubs.Set(xenapi_conn.XenAPISession, 'get_product_version',
+ lambda s: product_version)
def stub_out_get_target(stubs):
diff --git a/nova/utils.py b/nova/utils.py
index 9a01a6fb8..a30d90ff1 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -872,7 +872,7 @@ def gen_uuid():
def is_uuid_like(val):
- """For our purposes, a UUID is a string in canoical form:
+ """For our purposes, a UUID is a string in canonical form:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
@@ -910,7 +910,7 @@ def is_valid_ipv4(address):
def monkey_patch():
""" If the Flags.monkey_patch set as True,
- this functuion patches a decorator
+ this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using FLAGS.monkey_patch_modules.
@@ -979,21 +979,40 @@ def generate_glance_url():
@contextlib.contextmanager
-def original_exception_raised():
- """Run some code, then re-raise the original exception.
+def save_and_reraise_exception():
+ """Save current exception, run some code and then re-raise.
- This is needed because when Eventlet switches greenthreads, it clears the
- exception context. This means if exception handler code blocks, we'll lose
- the helpful exception traceback information.
+ In some cases the exception context can be cleared, resulting in None
+ being attempted to be reraised after an exception handler is run. This
+ can happen when eventlet switches greenthreads or when running an
+ exception handler, code raises and catches and exception. In both
+ cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
- then re-raise the original exception.
+ then re-raise the original exception. If another exception occurs, the
+ saved exception is logged and the new exception is reraised.
"""
type_, value, traceback = sys.exc_info()
try:
yield
- finally:
- raise type_, value, traceback
+ except:
+ LOG.exception(_('Original exception being dropped'),
+ exc_info=(type_, value, traceback))
+ raise
+ raise type_, value, traceback
+
+
+@contextlib.contextmanager
+def logging_error(message):
+ """Catches exception, write message to the log, re-raise.
+ This is a common refinement of save_and_reraise that writes a specific
+ message to the log.
+ """
+ try:
+ yield
+ except Exception as error:
+ with save_and_reraise_exception():
+ LOG.exception(message)
def make_dev_path(dev, partition=None, base='/dev'):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 6bd83fb4d..c2056f19d 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -87,9 +87,9 @@ class ComputeDriver(object):
virtual network interface, and these IDs are opaque to the rest of Nova.
Some methods here take an instance of nova.compute.service.Instance. This
- is the datastructure used by nova.compute to store details regarding an
+ is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
- translating that generic datastructure into terms that are specific to the
+ translating that generic data structure into terms that are specific to the
virtualization platform.
"""
@@ -127,7 +127,7 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def spawn(self, context, instance,
+ def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -143,6 +143,8 @@ class ComputeDriver(object):
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
+ :param image_meta: image object returned by nova.image.glance that
+ defines the image from which to boot this instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
@@ -253,11 +255,14 @@ class ComputeDriver(object):
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
- network_info, resize_instance):
+ network_info, image_meta, resize_instance):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
+ :param image_meta: image object returned by nova.image.glance that
+ defines the image from which this instance
+ was created
"""
raise NotImplementedError()
@@ -360,7 +365,7 @@ class ComputeDriver(object):
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
- It is the responsiblity of this method to make sure any rules
+ It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index ad40297a3..d4a554c81 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -97,7 +97,7 @@ class FakeConnection(driver.ComputeDriver):
"""Plugin VIFs into networks."""
pass
- def spawn(self, context, instance,
+ def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
name = instance.name
state = power_state.RUNNING
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 16fd94e7f..34c784e7a 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -138,7 +138,7 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
- def spawn(self, context, instance,
+ def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index f648af733..fc03ea67b 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -74,7 +74,6 @@ from nova.virt.libvirt import netutils
libvirt = None
-libxml2 = None
Template = None
@@ -155,11 +154,8 @@ def get_connection(read_only):
# Cheetah is separate because the unit tests want to load Cheetah,
# but not libvirt.
global libvirt
- global libxml2
if libvirt is None:
libvirt = __import__('libvirt')
- if libxml2 is None:
- libxml2 = __import__('libxml2')
_late_load_cheetah()
return LibvirtConnection(read_only)
@@ -196,6 +192,12 @@ class LibvirtConnection(driver.ComputeDriver):
driver_class = utils.import_class(driver)
self.volume_drivers[driver_type] = driver_class(self)
+ @property
+ def host_state(self):
+ if not self._host_state:
+ self._host_state = HostState(self._session)
+ return self._host_state
+
def init_host(self, host):
# NOTE(nsokolov): moved instance restarting to ComputeManager
pass
@@ -304,18 +306,29 @@ class LibvirtConnection(driver.ComputeDriver):
locals())
raise
- try:
- # NOTE(justinsb): We remove the domain definition. We probably
- # would do better to keep it if cleanup=False (e.g. volumes?)
- # (e.g. #2 - not losing machines on failure)
- virt_dom.undefine()
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- LOG.warning(_("Error from libvirt during undefine of "
- "%(instance_name)s. Code=%(errcode)s "
- "Error=%(e)s") %
- locals())
- raise
+ try:
+ # NOTE(derekh): we can switch to undefineFlags and
+ # VIR_DOMAIN_UNDEFINE_MANAGED_SAVE once we require 0.9.4
+ if virt_dom.hasManagedSaveImage(0):
+ virt_dom.managedSaveRemove(0)
+ except libvirt.libvirtError as e:
+ errcode = e.get_error_code()
+ LOG.warning(_("Error from libvirt during saved instance "
+ "removal %(instance_name)s. Code=%(errcode)s"
+ " Error=%(e)s") % locals())
+
+ try:
+ # NOTE(justinsb): We remove the domain definition. We probably
+ # would do better to keep it if cleanup=False (e.g. volumes?)
+ # (e.g. #2 - not losing machines on failure)
+ virt_dom.undefine()
+ except libvirt.libvirtError as e:
+ errcode = e.get_error_code()
+ LOG.warning(_("Error from libvirt during undefine of "
+ "%(instance_name)s. Code=%(errcode)s "
+ "Error=%(e)s") %
+ locals())
+ raise
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, network, mapping)
@@ -382,22 +395,15 @@ class LibvirtConnection(driver.ComputeDriver):
def _get_disk_xml(self, xml, device):
"""Returns the xml for the disk mounted at device"""
try:
- doc = libxml2.parseDoc(xml)
+ doc = ElementTree.fromstring(xml)
except Exception:
return None
- ctx = doc.xpathNewContext()
- try:
- ret = ctx.xpathEval('/domain/devices/disk')
- for node in ret:
- for child in node.children:
- if child.name == 'target':
- if child.prop('dev') == device:
- return str(node)
- finally:
- if ctx is not None:
- ctx.xpathFreeContext()
- if doc is not None:
- doc.freeDoc()
+ ret = doc.findall('./devices/disk')
+ for node in ret:
+ for child in node.getchildren():
+ if child.tag == 'target':
+ if child.get('dev') == device:
+ return ElementTree.tostring(node)
@exception.wrap_exception()
def detach_volume(self, connection_info, instance_name, mountpoint):
@@ -635,7 +641,7 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception()
- def spawn(self, context, instance, network_info,
+ def spawn(self, context, instance, image_meta, network_info,
block_device_info=None):
xml = self.to_xml(instance, network_info, False,
block_device_info=block_device_info)
@@ -744,7 +750,7 @@ class LibvirtConnection(driver.ComputeDriver):
token = str(uuid.uuid4())
host = instance['host']
- ajaxterm_cmd = 'sudo socat - %s' \
+ ajaxterm_cmd = 'sudo netcat - %s' \
% get_pty_for_instance(instance['name'])
cmd = ['%s/tools/ajaxterm/ajaxterm.py' % utils.novadir(),
@@ -890,7 +896,7 @@ class LibvirtConnection(driver.ComputeDriver):
user_id=inst['user_id'],
project_id=inst['project_id'])
- root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
+ root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
size = FLAGS.minimum_root_size
inst_type_id = inst['instance_type_id']
@@ -1268,37 +1274,29 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
- # TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
try:
- doc = libxml2.parseDoc(xml)
+ doc = ElementTree.fromstring(xml)
except Exception:
return []
- ctx = doc.xpathNewContext()
disks = []
- try:
- ret = ctx.xpathEval('/domain/devices/disk')
+ ret = doc.findall('./devices/disk')
- for node in ret:
- devdst = None
+ for node in ret:
+ devdst = None
- for child in node.children:
- if child.name == 'target':
- devdst = child.prop('dev')
+ for child in node.children:
+ if child.name == 'target':
+ devdst = child.prop('dev')
- if devdst is None:
- continue
+ if devdst is None:
+ continue
- disks.append(devdst)
- finally:
- if ctx is not None:
- ctx.xpathFreeContext()
- if doc is not None:
- doc.freeDoc()
+ disks.append(devdst)
return disks
@@ -1309,37 +1307,29 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all network interfaces for this instance.
"""
domain = self._lookup_by_name(instance_name)
- # TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
try:
- doc = libxml2.parseDoc(xml)
+ doc = ElementTree.fromstring(xml)
except Exception:
return []
- ctx = doc.xpathNewContext()
interfaces = []
- try:
- ret = ctx.xpathEval('/domain/devices/interface')
+ ret = doc.findall('./devices/interface')
- for node in ret:
- devdst = None
+ for node in ret:
+ devdst = None
- for child in node.children:
- if child.name == 'target':
- devdst = child.prop('dev')
+ for child in node.children:
+ if child.name == 'target':
+ devdst = child.prop('dev')
- if devdst is None:
- continue
+ if devdst is None:
+ continue
- interfaces.append(devdst)
- finally:
- if ctx is not None:
- ctx.xpathFreeContext()
- if doc is not None:
- doc.freeDoc()
+ interfaces.append(devdst)
return interfaces
@@ -1471,8 +1461,8 @@ class LibvirtConnection(driver.ComputeDriver):
"""
xml = self._conn.getCapabilities()
- xml = libxml2.parseDoc(xml)
- nodes = xml.xpathEval('//host/cpu')
+ xml = ElementTree.fromstring(xml)
+ nodes = xml.findall('.//host/cpu')
if len(nodes) != 1:
reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
reason += xml.serialize()
@@ -1480,38 +1470,36 @@ class LibvirtConnection(driver.ComputeDriver):
cpu_info = dict()
- arch_nodes = xml.xpathEval('//host/cpu/arch')
+ arch_nodes = xml.findall('.//host/cpu/arch')
if arch_nodes:
- cpu_info['arch'] = arch_nodes[0].getContent()
+ cpu_info['arch'] = arch_nodes[0].text
- model_nodes = xml.xpathEval('//host/cpu/model')
+ model_nodes = xml.findall('.//host/cpu/model')
if model_nodes:
- cpu_info['model'] = model_nodes[0].getContent()
+ cpu_info['model'] = model_nodes[0].text
- vendor_nodes = xml.xpathEval('//host/cpu/vendor')
+ vendor_nodes = xml.findall('.//host/cpu/vendor')
if vendor_nodes:
- cpu_info['vendor'] = vendor_nodes[0].getContent()
+ cpu_info['vendor'] = vendor_nodes[0].text
- topology_nodes = xml.xpathEval('//host/cpu/topology')
+ topology_nodes = xml.findall('.//host/cpu/topology')
topology = dict()
if topology_nodes:
- topology_node = topology_nodes[0].get_properties()
- while topology_node:
- name = topology_node.get_name()
- topology[name] = topology_node.getContent()
- topology_node = topology_node.get_next()
+ topology_node = topology_nodes[0]
keys = ['cores', 'sockets', 'threads']
- tkeys = topology.keys()
+ tkeys = topology_node.keys()
if set(tkeys) != set(keys):
ks = ', '.join(keys)
reason = _("topology (%(topology)s) must have %(ks)s")
raise exception.InvalidCPUInfo(reason=reason % locals())
+ for key in keys:
+ topology[key] = topology_node.get(key)
- feature_nodes = xml.xpathEval('//host/cpu/feature')
+ feature_nodes = xml.findall('.//host/cpu/feature')
features = list()
for nodes in feature_nodes:
- features.append(nodes.get_properties().getContent())
+ features.append(nodes.get('name'))
cpu_info['topology'] = topology
cpu_info['features'] = features
@@ -1872,26 +1860,21 @@ class LibvirtConnection(driver.ComputeDriver):
virt_dom = self._lookup_by_name(instance_ref.name)
xml = virt_dom.XMLDesc(0)
- doc = libxml2.parseDoc(xml)
- disk_nodes = doc.xpathEval('//devices/disk')
- path_nodes = doc.xpathEval('//devices/disk/source')
- driver_nodes = doc.xpathEval('//devices/disk/driver')
+ doc = ElementTree.fromstring(xml)
+ disk_nodes = doc.findall('.//devices/disk')
+ path_nodes = doc.findall('.//devices/disk/source')
+ driver_nodes = doc.findall('.//devices/disk/driver')
for cnt, path_node in enumerate(path_nodes):
- disk_type = disk_nodes[cnt].get_properties().getContent()
- path = path_node.get_properties().getContent()
+ disk_type = disk_nodes[cnt].get('type')
+ path = path_node.get('file')
if disk_type != 'file':
LOG.debug(_('skipping %(path)s since it looks like volume') %
locals())
continue
- # In case of libvirt.xml, disk type can be obtained
- # by the below statement.
- # -> disk_type = driver_nodes[cnt].get_properties().getContent()
- # but this xml is generated by kvm, format is slightly different.
- disk_type = \
- driver_nodes[cnt].get_properties().get_next().getContent()
+ disk_type = driver_nodes[cnt].get('type')
if disk_type == 'raw':
size = int(os.path.getsize(path))
backing_file = ""
@@ -1930,12 +1913,18 @@ class LibvirtConnection(driver.ComputeDriver):
network_info=network_info)
def update_host_status(self):
- """See xenapi_conn.py implementation."""
- pass
+ """Retrieve status info from libvirt.
+
+ Query libvirt to get the state of the compute node, such
+ as memory and disk usage.
+ """
+ return self.host_state.update_status()
def get_host_stats(self, refresh=False):
- """See xenapi_conn.py implementation."""
- pass
+ """Return the current state of the host.
+
+ If 'refresh' is True, run update the stats first."""
+ return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
@@ -1944,3 +1933,41 @@ class LibvirtConnection(driver.ComputeDriver):
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
+
+
+class HostState(object):
+ """Manages information about the compute node through libvirt"""
+ def __init__(self, read_only):
+ super(HostState, self).__init__()
+ self.read_only = read_only
+ self._stats = {}
+ self.connection = None
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host.
+
+ If 'refresh' is True, run update the stats first."""
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Retrieve status info from libvirt."""
+ LOG.debug(_("Updating host stats"))
+ if self.connection is None:
+ self.connection = get_connection(self.read_only)
+ data = {}
+ data["vcpus"] = self.connection.get_vcpu_total()
+ data["vcpus_used"] = self.connection.get_vcpu_used()
+ data["cpu_info"] = utils.loads(self.connection.get_cpu_info())
+ data["disk_total"] = self.connection.get_local_gb_total()
+ data["disk_used"] = self.connection.get_local_gb_used()
+ data["disk_available"] = data["disk_total"] - data["disk_used"]
+ data["host_memory_total"] = self.connection.get_memory_mb_total()
+ data["host_memory_free"] = data["host_memory_total"] - \
+ self.connection.get_memory_mb_used()
+ data["hypervisor_type"] = self.connection.get_hypervisor_type()
+ data["hypervisor_version"] = self.connection.get_hypervisor_version()
+
+ self._stats = data
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 077c32474..96cf2e77d 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -107,8 +107,17 @@ class LibvirtOpenVswitchDriver(VIFDriver):
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
if not linux_net._device_exists(dev):
- utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
+ # Older version of the command 'ip' from the iproute2 package
+ # don't have support for the tuntap option (lp:882568). If it
+ # turns out we're on an old version we work around this by using
+ # tunctl.
+ try:
+ # First, try with 'ip'
+ utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True)
+ except exception.ProcessExecutionError:
+ # Second option: tunctl
+ utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
FLAGS.libvirt_ovs_bridge, dev,
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index b4cb7f037..3206bab71 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -79,7 +79,7 @@ class VMWareVMOps(object):
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, context, instance, network_info):
+ def spawn(self, context, instance, image_meta, network_info):
"""
Creates a VM instance.
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index bef8b00f8..4d6e9d5fb 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -124,10 +124,10 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
- def spawn(self, context, instance, network_info,
+ def spawn(self, context, instance, image_meta, network_info,
block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(context, instance, network_info)
+ self._vmops.spawn(context, instance, image_meta, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 475ed543c..59ea19fd0 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -144,6 +144,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
'xenstore_data': '',
'sm_config': {},
'physical_utilisation': '123',
+ 'managed': True,
'VBDs': {}})
@@ -352,6 +353,65 @@ class SessionBase(object):
rec['currently_attached'] = False
rec['device'] = ''
+ def PBD_create(self, _1, pbd_rec):
+ pbd_ref = _create_object('PBD', pbd_rec)
+ _db_content['PBD'][pbd_ref]['currently_attached'] = False
+ return pbd_ref
+
+ def PBD_plug(self, _1, pbd_ref):
+ rec = get_record('PBD', pbd_ref)
+ if rec['currently_attached']:
+ raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
+ rec['currently_attached'] = True
+ sr_ref = rec['SR']
+ _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
+
+ def PBD_unplug(self, _1, pbd_ref):
+ rec = get_record('PBD', pbd_ref)
+ if not rec['currently_attached']:
+ raise Failure(['DEVICE_ALREADY_DETACHED', ref])
+ rec['currently_attached'] = False
+ sr_ref = pbd_ref['SR']
+ _db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
+
+ def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
+ shared, sm_config):
+ host_ref = _db_content['host'].keys()[0]
+
+ ref = None
+ rec = None
+ for ref, rec in _db_content['SR'].iteritems():
+ if rec.get('uuid') == sr_uuid:
+ break
+ if rec:
+ # make forgotten = 0 and return ref
+ _db_content['SR'][ref]['forgotten'] = 0
+ return ref
+ else:
+ # SR not found in db, so we create one
+ params = {}
+ params.update(locals())
+ del params['self']
+ sr_ref = _create_object('SR', params)
+ _db_content['SR'][sr_ref]['uuid'] = sr_uuid
+ _db_content['SR'][sr_ref]['forgotten'] = 0
+ if type in ('iscsi'):
+ # Just to be clear
+ vdi_per_lun = True
+ if vdi_per_lun:
+ # we need to create a vdi because this introduce
+ # is likely meant for a single vdi
+ vdi_ref = create_vdi('', False, sr_ref, False)
+ _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
+ _db_content['VDI'][vdi_ref]['SR'] = sr_ref
+ return sr_ref
+
+ def SR_forget(self, _1, sr_ref):
+ _db_content['SR'][sr_ref]['forgotten'] = 1
+
+ def SR_scan(self, _1, sr_ref):
+ return
+
def PIF_get_all_records_where(self, _1, _2):
# TODO (salvatore-orlando): filter table on _2
return _db_content['PIF']
@@ -381,6 +441,8 @@ class SessionBase(object):
def VDI_resize_online(self, *args):
return 'derp'
+ VDI_resize = VDI_resize_online
+
def VM_clean_reboot(self, *args):
return 'burp'
@@ -573,7 +635,7 @@ class SessionBase(object):
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
- raise Failure(['HANDLE_INVALID', 'session', self._session])
+ raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 7142d0457..2c394ce64 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -45,7 +45,7 @@ from nova.compute import power_state
from nova.virt import disk
from nova.virt import images
from nova.virt.xenapi import HelperBase
-from nova.virt.xenapi.volume_utils import StorageError
+from nova.virt.xenapi import volume_utils
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
@@ -171,9 +171,6 @@ class VMHelper(HelperBase):
'VCPUs_params': {},
'xenstore_data': {}}
- if instance_type.get("vcpu_weight"):
- rec["VCPUs_params"]["weight"] = instance_type["vcpu_weight"]
-
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
@@ -210,30 +207,6 @@ class VMHelper(HelperBase):
return host_free_mem >= mem
@classmethod
- def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
- """Create a VBD record. Returns a Deferred that gives the new
- VBD reference."""
- vbd_rec = {}
- vbd_rec['VM'] = vm_ref
- vbd_rec['VDI'] = vdi_ref
- vbd_rec['userdevice'] = str(userdevice)
- vbd_rec['bootable'] = bootable
- vbd_rec['mode'] = 'RW'
- vbd_rec['type'] = 'disk'
- vbd_rec['unpluggable'] = True
- vbd_rec['empty'] = False
- vbd_rec['other_config'] = {}
- vbd_rec['qos_algorithm_type'] = ''
- vbd_rec['qos_algorithm_params'] = {}
- vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
- ' VDI %(vdi_ref)s ... ') % locals())
- vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
- ' VDI %(vdi_ref)s.') % locals())
- return vbd_ref
-
- @classmethod
def create_cd_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference specific to CDRom devices."""
@@ -269,7 +242,8 @@ class VMHelper(HelperBase):
return vbd_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
- raise StorageError(_('VBD not found in instance %s') % vm_ref)
+ raise volume_utils.StorageError(
+ _('VBD not found in instance %s') % vm_ref)
@classmethod
def unplug_vbd(cls, session, vbd_ref):
@@ -279,7 +253,8 @@ class VMHelper(HelperBase):
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
- raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
+ raise volume_utils.StorageError(
+ _('Unable to unplug VBD %s') % vbd_ref)
@classmethod
def destroy_vbd(cls, session, vbd_ref):
@@ -289,7 +264,8 @@ class VMHelper(HelperBase):
session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
- raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
+ raise volume_utils.StorageError(
+ _('Unable to destroy VBD %s') % vbd_ref)
@classmethod
def destroy_vdi(cls, session, vdi_ref):
@@ -298,7 +274,8 @@ class VMHelper(HelperBase):
session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
- raise StorageError(_('Unable to destroy VDI %s') % vdi_ref)
+ raise volume_utils.StorageError(
+ _('Unable to destroy VDI %s') % vdi_ref)
@classmethod
def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only):
@@ -507,6 +484,12 @@ w
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create swap partition
+
+ # NOTE(jk0): We use a FAT32 filesystem for the Windows swap
+ # partition because that is what parted supports.
+ is_windows = instance.os_type == "windows"
+ fs_type = "fat32" if is_windows else "linux-swap"
+
dev_path = utils.make_dev_path(dev)
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True)
@@ -514,16 +497,16 @@ w
partition_start = 0
partition_end = swap_mb
utils.execute('parted', '--script', dev_path, 'mkpartfs',
- 'primary', 'linux-swap',
+ 'primary', fs_type,
str(partition_start),
str(partition_end),
run_as_root=True)
# 4. Create VBD between instance VM and swap VDI
- cls.create_vbd(session, vm_ref, vdi_ref, userdevice,
- bootable=False)
+ volume_utils.VolumeHelper.create_vbd(
+ session, vm_ref, vdi_ref, userdevice, bootable=False)
except:
- with utils.original_exception_raised():
+ with utils.save_and_reraise_exception():
cls.destroy_vdi(session, vdi_ref)
@classmethod
@@ -734,7 +717,7 @@ w
raise e
@classmethod
- def determine_disk_image_type(cls, instance, context):
+ def determine_disk_image_type(cls, image_meta):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
@@ -753,12 +736,11 @@ w
ImageType.DISK_VHD: 'DISK_VHD',
ImageType.DISK_ISO: 'DISK_ISO'}
disk_format = pretty_format[image_type]
- image_ref = instance.image_ref
- instance_id = instance.id
+ image_ref = image_meta['id']
LOG.debug(_("Detected %(disk_format)s format for image "
- "%(image_ref)s, instance %(instance_id)s") % locals())
+ "%(image_ref)s") % locals())
- def determine_from_glance():
+ def determine_from_image_meta():
glance_disk_format2nova_type = {
'ami': ImageType.DISK,
'aki': ImageType.KERNEL,
@@ -766,23 +748,13 @@ w
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD,
'iso': ImageType.DISK_ISO}
- image_ref = instance.image_ref
- glance_client, image_id = glance.get_glance_client(context,
- image_ref)
- meta = glance_client.get_image_meta(image_id)
- disk_format = meta['disk_format']
+ disk_format = image_meta['disk_format']
try:
return glance_disk_format2nova_type[disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=disk_format)
- def determine_from_instance():
- if instance.kernel_id:
- return ImageType.DISK
- else:
- return ImageType.DISK_RAW
-
- image_type = determine_from_glance()
+ image_type = determine_from_image_meta()
log_disk_format(image_type)
return image_type
@@ -1238,7 +1210,8 @@ def _wait_for_device(dev):
return
time.sleep(1)
- raise StorageError(_('Timeout waiting for device %s to be created') % dev)
+ raise volume_utils.StorageError(
+ _('Timeout waiting for device %s to be created') % dev)
@contextlib.contextmanager
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 93b4ae153..5533b8cfd 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -41,6 +41,7 @@ from nova import utils
from nova.compute import api as compute
from nova.compute import power_state
from nova.virt import driver
+from nova.virt.xenapi.volume_utils import VolumeHelper
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
@@ -84,13 +85,14 @@ class VMOps(object):
"""
Management class for VM-related tasks
"""
- def __init__(self, session):
+ def __init__(self, session, product_version):
self.XenAPI = session.get_imported_xenapi()
self.compute_api = compute.API()
self._session = session
self.poll_rescue_last_ran = None
VMHelper.XenAPI = self.XenAPI
self.vif_driver = utils.import_object(FLAGS.xenapi_vif_driver)
+ self._product_version = product_version
def list_instances(self):
"""List VM instances."""
@@ -138,13 +140,13 @@ class VMOps(object):
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
- network_info, resize_instance):
+ network_info, image_meta, resize_instance):
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
disk_info['cow'])
vm_ref = self._create_vm(context, instance,
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
- network_info)
+ network_info, image_meta)
if resize_instance:
self.resize_instance(instance, vdi_uuid)
@@ -164,8 +166,8 @@ class VMOps(object):
LOG.debug(_("Starting instance %s"), instance.name)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- def _create_disks(self, context, instance):
- disk_image_type = VMHelper.determine_disk_image_type(instance, context)
+ def _create_disks(self, context, instance, image_meta):
+ disk_image_type = VMHelper.determine_disk_image_type(image_meta)
vdis = VMHelper.fetch_image(context, self._session,
instance, instance.image_ref,
instance.user_id, instance.project_id,
@@ -177,7 +179,7 @@ class VMOps(object):
return vdis
- def spawn(self, context, instance, network_info):
+ def spawn(self, context, instance, image_meta, network_info):
vdis = None
try:
# 1. Vanity Step
@@ -192,13 +194,14 @@ class VMOps(object):
total_steps=BUILD_TOTAL_STEPS)
# 2. Fetch the Image over the Network
- vdis = self._create_disks(context, instance)
+ vdis = self._create_disks(context, instance, image_meta)
self._update_instance_progress(context, instance,
step=2,
total_steps=BUILD_TOTAL_STEPS)
# 3. Create the VM records
- vm_ref = self._create_vm(context, instance, vdis, network_info)
+ vm_ref = self._create_vm(context, instance, vdis, network_info,
+ image_meta)
self._update_instance_progress(context, instance,
step=3,
total_steps=BUILD_TOTAL_STEPS)
@@ -221,7 +224,7 @@ class VMOps(object):
"""Spawn a rescue instance."""
self.spawn(context, instance, network_info)
- def _create_vm(self, context, instance, vdis, network_info):
+ def _create_vm(self, context, instance, vdis, network_info, image_meta):
"""Create VM instance."""
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
@@ -232,7 +235,7 @@ class VMOps(object):
if not VMHelper.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance.uuid)
- disk_image_type = VMHelper.determine_disk_image_type(instance, context)
+ disk_image_type = VMHelper.determine_disk_image_type(image_meta)
kernel = None
ramdisk = None
try:
@@ -328,7 +331,7 @@ class VMOps(object):
first_vdi_ref = VMHelper.fetch_blank_disk(session=self._session,
instance_type_id=instance.instance_type_id)
- VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=False)
# device 1 reserved for rescue disk and we've used '0'
@@ -349,8 +352,9 @@ class VMOps(object):
" %(instance_id)s, skipping resize partition")
% locals())
- VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
- vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=True)
+ VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=first_vdi_ref,
+ userdevice=userdevice, bootable=True)
# set user device to next free value
# userdevice 1 is reserved for rescue and we've used '0'
@@ -372,7 +376,7 @@ class VMOps(object):
continue
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdi['vdi_uuid'])
- VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=vdi_ref, userdevice=userdevice,
bootable=False)
userdevice += 1
@@ -441,6 +445,14 @@ class VMOps(object):
LOG.debug(_("Resetting network"))
self.reset_network(instance, vm_ref)
+ def _set_vcpu_weight():
+ inst_type = db.instance_type_get(ctx, instance.instance_type_id)
+ vcpu_weight = inst_type["vcpu_weight"]
+ if str(vcpu_weight) != "None":
+ LOG.debug(_("Setting VCPU weight"))
+ self._session.call_xenapi("VM.add_to_VCPUs_params", vm_ref,
+ "weight", vcpu_weight)
+
# NOTE(armando): Do we really need to do this in virt?
# NOTE(tr3buchet): not sure but wherever we do it, we need to call
# reset_network afterwards
@@ -456,6 +468,7 @@ class VMOps(object):
_inject_files()
_set_admin_password()
_reset_network()
+ _set_vcpu_weight()
return True
except Exception, exc:
LOG.warn(exc)
@@ -618,9 +631,15 @@ class VMOps(object):
'instance_id': instance_id,
'sr_path': sr_path}
- task = self._session.async_call_plugin('migration', 'transfer_vhd',
- {'params': pickle.dumps(params)})
- self._session.wait_for_task(task, instance_id)
+ try:
+ _params = {'params': pickle.dumps(params)}
+ task = self._session.async_call_plugin('migration',
+ 'transfer_vhd',
+ _params)
+ self._session.wait_for_task(task, instance_id)
+ except self.XenAPI.Failure:
+ msg = _("Failed to transfer vhd to new host")
+ raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance.name + '-orig'
@@ -750,7 +769,11 @@ class VMOps(object):
" GB") % locals())
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
# for an instance with no local storage
- self._session.call_xenapi('VDI.resize_online', vdi_ref,
+ if self._product_version[0] > 5:
+ resize_func_name = 'VDI.resize'
+ else:
+ resize_func_name = 'VDI.resize_online'
+ self._session.call_xenapi(resize_func_name, vdi_ref,
str(new_disk_size))
LOG.debug(_("Resize instance %s complete") % (instance.name))
@@ -916,8 +939,8 @@ class VMOps(object):
vbd_ref = self._session.call_xenapi("VM.get_VBDs", vm_ref)[1]
vdi_ref = self._session.call_xenapi("VBD.get_record", vbd_ref)["VDI"]
- return VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, 1,
- False)
+ return VolumeHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref,
+ 1, False)
def _shutdown_rescue(self, rescue_vm_ref):
"""Shutdown a rescue instance."""
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index c2d8e511f..e7975feec 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -47,6 +47,108 @@ class VolumeHelper(HelperBase):
"""
@classmethod
+ def create_sr(cls, session, label, params):
+
+ LOG.debug(_("creating sr within volume_utils"))
+ type = params['sr_type']
+ del params['sr_type']
+ LOG.debug(_('type is = %s') % type)
+ if 'name_description' in params:
+ desc = params['name_description']
+ LOG.debug(_('name = %s') % desc)
+ del params['name_description']
+ else:
+ desc = ''
+ if 'id' in params:
+ del params['id']
+ LOG.debug(params)
+
+ try:
+ sr_ref = session.call_xenapi("SR.create",
+ session.get_xenapi_host(),
+ params,
+ '0', label, desc, type, '', False, {})
+ LOG.debug(_('Created %(label)s as %(sr_ref)s.') % locals())
+ return sr_ref
+
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to create Storage Repository'))
+
+ @classmethod
+ def introduce_sr(cls, session, sr_uuid, label, params):
+ LOG.debug(_("introducing sr within volume_utils"))
+ type = params['sr_type']
+ del params['sr_type']
+ LOG.debug(_('type is = %s') % type)
+ if 'name_description' in params:
+ desc = params['name_description']
+ LOG.debug(_('name = %s') % desc)
+ del params['name_description']
+ else:
+ desc = ''
+ if 'id' in params:
+ del params['id']
+ LOG.debug(params)
+
+ try:
+ sr_ref = session.call_xenapi("SR.introduce",
+ sr_uuid,
+ label,
+ desc,
+ type,
+ '',
+ False,
+ params,)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
+
+ #Create pbd
+ LOG.debug(_('Creating pbd for SR'))
+ pbd_ref = cls.create_pbd(session, sr_ref, params)
+ LOG.debug(_('Plugging SR'))
+ #Plug pbd
+ session.call_xenapi("PBD.plug", pbd_ref)
+ session.call_xenapi("SR.scan", sr_ref)
+ return sr_ref
+
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to introduce Storage Repository'))
+
+ @classmethod
+ def forget_sr(cls, session, sr_uuid):
+ """
+ Forgets the storage repository without destroying the VDIs within
+ """
+ try:
+ sr_ref = session.call_xenapi("SR.get_by_uuid", sr_uuid)
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to get SR using uuid'))
+
+ LOG.debug(_('Forgetting SR %s...') % sr_ref)
+
+ try:
+ cls.unplug_pbds(session, sr_ref)
+ sr_ref = session.call_xenapi("SR.forget", sr_ref)
+
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to forget Storage Repository'))
+
+ @classmethod
+ def find_sr_by_uuid(cls, session, sr_uuid):
+ """
+ Return the storage repository given a uuid.
+ """
+ sr_refs = session.call_xenapi("SR.get_all")
+ for sr_ref in sr_refs:
+ sr_rec = session.call_xenapi("SR.get_record", sr_ref)
+ if sr_rec['uuid'] == sr_uuid:
+ return sr_ref
+ return None
+
+ @classmethod
def create_iscsi_storage(cls, session, info, label, description):
"""
Create an iSCSI storage repository that will be used to mount
@@ -67,10 +169,6 @@ class VolumeHelper(HelperBase):
'port': info['targetPort'],
'targetIQN': info['targetIQN']}
try:
- sr_ref = session.call_xenapi("SR.create",
- session.get_xenapi_host(),
- record,
- '0', label, description, 'iscsi', '', False, {})
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except cls.XenAPI.Failure, exc:
@@ -91,9 +189,40 @@ class VolumeHelper(HelperBase):
return sr_ref
@classmethod
- def destroy_iscsi_storage(cls, session, sr_ref):
- """Forget the SR whilst preserving the state of the disk"""
- LOG.debug(_("Forgetting SR %s ... "), sr_ref)
+ def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
+ """Create a VBD record. Returns a Deferred that gives the new
+ VBD reference."""
+ vbd_rec = {}
+ vbd_rec['VM'] = vm_ref
+ vbd_rec['VDI'] = vdi_ref
+ vbd_rec['userdevice'] = str(userdevice)
+ vbd_rec['bootable'] = bootable
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... ') % locals())
+ vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
+ LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s.') % locals())
+ return vbd_ref
+
+ @classmethod
+ def create_pbd(cls, session, sr_ref, params):
+ pbd_rec = {}
+ pbd_rec['host'] = session.get_xenapi_host()
+ pbd_rec['SR'] = sr_ref
+ pbd_rec['device_config'] = params
+ pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
+ return pbd_ref
+
+ @classmethod
+ def unplug_pbds(cls, session, sr_ref):
pbds = []
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
@@ -106,45 +235,72 @@ class VolumeHelper(HelperBase):
except cls.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s') % locals())
- try:
- session.call_xenapi("SR.forget", sr_ref)
- LOG.debug(_("Forgetting SR %s done."), sr_ref)
- except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %(exc)s when forgetting'
- ' SR %(sr_ref)s') % locals())
@classmethod
- def introduce_vdi(cls, session, sr_ref):
+ def introduce_vdi(cls, session, sr_ref, vdi_uuid=None):
"""Introduce VDI in the host"""
try:
- vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
+ session.call_xenapi("SR.scan", sr_ref)
+ if vdi_uuid:
+ LOG.debug("vdi_uuid: %s" % vdi_uuid)
+ vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
+ else:
+ vdi_ref = (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
+
try:
- vdi_rec = session.call_xenapi("VDI.get_record", vdi_refs[0])
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ LOG.debug(vdi_rec)
+ LOG.debug(type(vdi_rec))
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
- ' of VDI %s on') % vdi_refs[0])
- else:
+ ' of VDI %s on') % vdi_ref)
+
+ if vdi_rec['managed']:
+ # We do not need to introduce the vdi
+ return vdi_ref
+
+ try:
+ return session.call_xenapi("VDI.introduce",
+ vdi_rec['uuid'],
+ vdi_rec['name_label'],
+ vdi_rec['name_description'],
+ vdi_rec['SR'],
+ vdi_rec['type'],
+ vdi_rec['sharable'],
+ vdi_rec['read_only'],
+ vdi_rec['other_config'],
+ vdi_rec['location'],
+ vdi_rec['xenstore_data'],
+ vdi_rec['sm_config'])
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to introduce VDI for SR %s')
+ % sr_ref)
+
+ @classmethod
+ def purge_sr(cls, session, sr_ref):
+ try:
+ sr_rec = session.call_xenapi("SR.get_record", sr_ref)
+ vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
+ except StorageError, ex:
+ LOG.exception(ex)
+ raise StorageError(_('Error finding vdis in SR %s') % sr_ref)
+
+ for vdi_ref in vdi_refs:
try:
- return session.call_xenapi("VDI.introduce",
- vdi_rec['uuid'],
- vdi_rec['name_label'],
- vdi_rec['name_description'],
- vdi_rec['SR'],
- vdi_rec['type'],
- vdi_rec['sharable'],
- vdi_rec['read_only'],
- vdi_rec['other_config'],
- vdi_rec['location'],
- vdi_rec['xenstore_data'],
- vdi_rec['sm_config'])
- except cls.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to introduce VDI for SR %s')
- % sr_ref)
+ vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
+ except StorageError, ex:
+ LOG.exception(ex)
+ raise StorageError(_('Unable to find vbd for vdi %s') \
+ % vdi_ref)
+ if len(vbd_refs) > 0:
+ return
+
+ cls.forget_sr(session, sr_rec['uuid'])
@classmethod
def parse_volume_info(cls, connection_info, mountpoint):
@@ -176,11 +332,15 @@ class VolumeHelper(HelperBase):
raise StorageError(_('Unable to obtain target information'
' %(data)s, %(mountpoint)s') % locals())
volume_info = {}
- volume_info['deviceNumber'] = device_number
- volume_info['volumeId'] = volume_id
- volume_info['targetHost'] = target_host
- volume_info['targetPort'] = target_port
+ volume_info['id'] = volume_id
+ volume_info['target'] = target_host
+ volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
+ if 'auth_method' in connection_info and \
+ connection_info['auth_method'] == 'CHAP':
+ volume_info['chapuser'] = connection_info['auth_username']
+ volume_info['chappassword'] = connection_info['auth_password']
+
return volume_info
@classmethod
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 661373c4a..f9ba17fe6 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -40,6 +40,78 @@ class VolumeOps(object):
VolumeHelper.XenAPI = self.XenAPI
VMHelper.XenAPI = self.XenAPI
+ def create_volume_for_sm(self, volume, sr_uuid):
+ LOG.debug("Creating volume for Storage Manager")
+
+ sm_vol_rec = {}
+ try:
+ sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to get SR using uuid'))
+ #Create VDI
+ label = 'vol-' + hex(volume['id'])[:-1]
+ # size presented to xenapi is in bytes, while euca api is in GB
+ vdi_size = volume['size'] * 1024 * 1024 * 1024
+ vdi_ref = VMHelper.create_vdi(self._session, sr_ref, label, vdi_size,
+ False)
+ vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
+ sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
+ return sm_vol_rec
+
+ def delete_volume_for_sm(self, vdi_uuid):
+ vdi_ref = self._session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
+ if vdi_ref == None:
+ raise exception.Error(_('Could not find VDI ref'))
+
+ try:
+ self._session.call_xenapi("VDI.destroy", vdi_ref)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Error destroying VDI'))
+
+ def create_sr(self, label, params):
+ LOG.debug(_("Creating SR %s") % label)
+ sr_ref = VolumeHelper.create_sr(self._session, label, params)
+ if sr_ref == None:
+ raise exception.Error(_('Could not create SR'))
+ sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
+ if sr_rec == None:
+ raise exception.Error(_('Could not retrieve SR record'))
+ return sr_rec['uuid']
+
+ # Checks if sr has already been introduced to this host
+ def introduce_sr(self, sr_uuid, label, params):
+ LOG.debug(_("Introducing SR %s") % label)
+ sr_ref = VolumeHelper.find_sr_by_uuid(self._session, sr_uuid)
+ if sr_ref:
+ LOG.debug(_('SR found in xapi database. No need to introduce'))
+ return sr_ref
+ sr_ref = VolumeHelper.introduce_sr(self._session, sr_uuid, label,
+ params)
+ if sr_ref == None:
+ raise exception.Error(_('Could not introduce SR'))
+ return sr_ref
+
+ def is_sr_on_host(self, sr_uuid):
+ LOG.debug(_('Checking for SR %s') % sr_uuid)
+ sr_ref = VolumeHelper.find_sr_by_uuid(self._session, sr_uuid)
+ if sr_ref:
+ return True
+ return False
+
+ # Checks if sr has been introduced
+ def forget_sr(self, sr_uuid):
+ sr_ref = VolumeHelper.find_sr_by_uuid(self._session, sr_uuid)
+ if sr_ref == None:
+ LOG.INFO(_('SR %s not found in the xapi database') % sr_uuid)
+ return
+ try:
+ VolumeHelper.forget_sr(self._session, sr_uuid)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise exception.Error(_('Could not forget SR'))
+
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance"""
# Before we start, check that the VM exists
@@ -50,48 +122,79 @@ class VolumeOps(object):
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
driver_type = connection_info['driver_volume_type']
- if driver_type != 'iscsi':
+ if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
- # Create the iSCSI SR, and the PDB through which hosts access SRs.
- # But first, retrieve target info, like Host, IQN, LUN and SCSIID
- vol_rec = VolumeHelper.parse_volume_info(connection_info, mountpoint)
- label = 'SR-%s' % vol_rec['volumeId']
- description = 'Disk-for:%s' % instance_name
- # Create SR
- sr_ref = VolumeHelper.create_iscsi_storage(self._session,
- vol_rec,
- label,
- description)
+
+ data = connection_info['data']
+ if 'name_label' not in data:
+ label = 'tempSR-%s' % data['volume_id']
+ else:
+ label = data['name_label']
+ del data['name_label']
+
+ if 'name_description' not in data:
+ desc = 'Disk-for:%s' % instance_name
+ else:
+ desc = data['name_description']
+
+ LOG.debug(connection_info)
+ sr_params = {}
+ if u'sr_uuid' not in data:
+ sr_params = VolumeHelper.parse_volume_info(connection_info,
+ mountpoint)
+ uuid = "FA15E-D15C-" + str(sr_params['id'])
+ sr_params['sr_type'] = 'iscsi'
+ else:
+ uuid = data['sr_uuid']
+ for k in data['introduce_sr_keys']:
+ sr_params[k] = data[k]
+
+ sr_params['name_description'] = desc
+
+ # Introduce SR
+ try:
+ sr_ref = self.introduce_sr(uuid, label, sr_params)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to introduce Storage Repository'))
+
+ if 'vdi_uuid' in data:
+ vdi_uuid = data['vdi_uuid']
+ else:
+ vdi_uuid = None
+
# Introduce VDI and attach VBD to VM
try:
- vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
+ vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref,
+ vdi_uuid)
except StorageError, exc:
LOG.exception(exc)
- VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ self.forget_sr(uuid)
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
- else:
- try:
- vbd_ref = VMHelper.create_vbd(self._session,
- vm_ref, vdi_ref,
- vol_rec['deviceNumber'],
- False)
- except self.XenAPI.Failure, exc:
- LOG.exception(exc)
- VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to use SR %(sr_ref)s for'
- ' instance %(instance_name)s') % locals())
- else:
- try:
- task = self._session.call_xenapi('Async.VBD.plug',
- vbd_ref)
- self._session.wait_for_task(task, vol_rec['deviceNumber'])
- except self.XenAPI.Failure, exc:
- LOG.exception(exc)
- VolumeHelper.destroy_iscsi_storage(self._session,
- sr_ref)
- raise Exception(_('Unable to attach volume to instance %s')
- % instance_name)
+
+ dev_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ try:
+ vbd_ref = VolumeHelper.create_vbd(self._session,
+ vm_ref,
+ vdi_ref,
+ dev_number,
+ False)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ self.forget_sr(uuid)
+ raise Exception(_('Unable to use SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
+
+ try:
+ self._session.call_xenapi("VBD.plug", vbd_ref)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ self.forget_sr(uuid)
+ raise Exception(_('Unable to attach volume to instance %s')
+ % instance_name)
+
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s') % locals())
@@ -111,19 +214,26 @@ class VolumeOps(object):
except StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
- else:
- try:
- sr_ref = VolumeHelper.find_sr_from_vbd(self._session,
- vbd_ref)
- VMHelper.unplug_vbd(self._session, vbd_ref)
- except StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to detach volume %s') % mountpoint)
- try:
- VMHelper.destroy_vbd(self._session, vbd_ref)
- except StorageError, exc:
- LOG.exception(exc)
- # Forget SR
- VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+
+ try:
+ sr_ref = VolumeHelper.find_sr_from_vbd(self._session,
+ vbd_ref)
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise Exception(_('Unable to detach volume %s') % mountpoint)
+ try:
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise Exception(_('Unable to destroy vbd %s') % mountpoint)
+
+ # Forget SR only if no other volumes on this host are using it
+ try:
+ VolumeHelper.purge_sr(self._session, sr_ref)
+ except StorageError, exc:
+ LOG.exception(exc)
+ raise Exception(_('Error purging SR %s') % sr_ref)
+
LOG.info(_('Mountpoint %(mountpoint)s detached from'
' instance %(instance_name)s') % locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index eb153a647..73d1b0cb7 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -169,12 +169,13 @@ class XenAPIConnection(driver.ComputeDriver):
def __init__(self, url, user, pw):
super(XenAPIConnection, self).__init__()
self._session = XenAPISession(url, user, pw)
- self._vmops = VMOps(self._session)
self._volumeops = VolumeOps(self._session)
self._host_state = None
+ self._product_version = self._session.get_product_version()
+ self._vmops = VMOps(self._session, self._product_version)
@property
- def HostState(self):
+ def host_state(self):
if not self._host_state:
self._host_state = HostState(self._session)
return self._host_state
@@ -193,10 +194,10 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
- def spawn(self, context, instance,
+ def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
"""Create VM instance"""
- self._vmops.spawn(context, instance, network_info)
+ self._vmops.spawn(context, instance, image_meta, network_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
@@ -208,10 +209,10 @@ class XenAPIConnection(driver.ComputeDriver):
self._vmops.finish_revert_migration(instance)
def finish_migration(self, context, migration, instance, disk_info,
- network_info, resize_instance=False):
+ network_info, image_meta, resize_instance=False):
"""Completes a resize, turning on the migrated instance"""
self._vmops.finish_migration(context, migration, instance, disk_info,
- network_info, resize_instance)
+ network_info, image_meta, resize_instance)
def snapshot(self, context, instance, image_id):
""" Create snapshot from a running VM instance """
@@ -416,12 +417,12 @@ class XenAPIConnection(driver.ComputeDriver):
def update_host_status(self):
"""Update the status info of the host, and return those values
to the calling program."""
- return self.HostState.update_status()
+ return self.host_state.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first."""
- return self.HostState.get_host_stats(refresh=refresh)
+ return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
@@ -455,6 +456,14 @@ class XenAPISession(object):
session.login_with_password(user, pw)
self._sessions.put(session)
+ def get_product_version(self):
+ """Return a tuple of (major, minor, rev) for the host version"""
+ host = self.get_xenapi_host()
+ software_version = self.call_xenapi('host.get_software_version',
+ host)
+ product_version = software_version['product_version']
+ return tuple(int(part) for part in product_version.split('.'))
+
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
return __import__('XenAPI')
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 34103a1f3..b4d5ac48d 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -54,7 +54,7 @@ class API(base.Base):
pid = context.project_id
LOG.warn(_("Quota exceeded for %(pid)s, tried to create"
" %(size)sG volume") % locals())
- raise quota.QuotaError(_("Volume quota exceeded. You cannot "
+ raise exception.QuotaError(_("Volume quota exceeded. You cannot "
"create a volume of size %sG") % size)
if availability_zone is None:
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index ea7386d86..893691669 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -213,6 +213,10 @@ class VolumeDriver(object):
True, run the update first."""
return None
+ def do_setup(self, context):
+ """Any initialization the volume driver does while starting"""
+ pass
+
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 613924e7f..4fb1fde26 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -80,8 +80,11 @@ class VolumeManager(manager.SchedulerDependentManager):
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service."""
- self.driver.check_for_setup_error()
+
ctxt = context.get_admin_context()
+ self.driver.do_setup(ctxt)
+ self.driver.check_for_setup_error()
+
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
@@ -123,11 +126,10 @@ class VolumeManager(manager.SchedulerDependentManager):
if model_update:
self.db.volume_update(context, volume_ref['id'], model_update)
except Exception:
- exc_info = sys.exc_info()
- self.db.volume_update(context,
- volume_ref['id'], {'status': 'error'})
- self._notify_vsa(context, volume_ref, 'error')
- raise exc_info
+ with utils.save_and_reraise_exception():
+ self.db.volume_update(context,
+ volume_ref['id'], {'status': 'error'})
+ self._notify_vsa(context, volume_ref, 'error')
now = utils.utcnow()
self.db.volume_update(context,
@@ -179,11 +181,10 @@ class VolumeManager(manager.SchedulerDependentManager):
{'status': 'available'})
return True
except Exception:
- exc_info = sys.exc_info()
- self.db.volume_update(context,
- volume_ref['id'],
- {'status': 'error_deleting'})
- raise exc_info
+ with utils.save_and_reraise_exception():
+ self.db.volume_update(context,
+ volume_ref['id'],
+ {'status': 'error_deleting'})
self.db.volume_destroy(context, volume_id)
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
@@ -204,9 +205,10 @@ class VolumeManager(manager.SchedulerDependentManager):
model_update)
except Exception:
- self.db.snapshot_update(context,
- snapshot_ref['id'], {'status': 'error'})
- raise
+ with utils.save_and_reraise_exception():
+ self.db.snapshot_update(context,
+ snapshot_ref['id'],
+ {'status': 'error'})
self.db.snapshot_update(context,
snapshot_ref['id'], {'status': 'available',
@@ -223,10 +225,10 @@ class VolumeManager(manager.SchedulerDependentManager):
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
self.driver.delete_snapshot(snapshot_ref)
except Exception:
- self.db.snapshot_update(context,
- snapshot_ref['id'],
- {'status': 'error_deleting'})
- raise
+ with utils.save_and_reraise_exception():
+ self.db.snapshot_update(context,
+ snapshot_ref['id'],
+ {'status': 'error_deleting'})
self.db.snapshot_destroy(context, snapshot_id)
LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
diff --git a/nova/volume/xensm.py b/nova/volume/xensm.py
new file mode 100644
index 000000000..dab14b689
--- /dev/null
+++ b/nova/volume/xensm.py
@@ -0,0 +1,244 @@
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+import time
+import os
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.volume.driver import VolumeDriver
+from nova.virt.xenapi_conn import XenAPISession
+from nova.virt.xenapi.volumeops import VolumeOps
+
+LOG = logging.getLogger("nova.volume.xensm")
+FLAGS = flags.FLAGS
+
+
+class XenSMDriver(VolumeDriver):
+
+ def _convert_config_params(self, conf_str):
+ params = dict([item.split("=") for item in conf_str.split()])
+ return params
+
+ def _get_introduce_sr_keys(self, params):
+ if 'name_label' in params:
+ del params['name_label']
+ keys = params.keys()
+ keys.append('sr_type')
+ return keys
+
+ def _create_storage_repo(self, context, backend_ref):
+ """Either creates or introduces SR on host
+ depending on whether it exists in xapi db."""
+ params = self._convert_config_params(backend_ref['config_params'])
+ if 'name_label' in params:
+ label = params['name_label']
+ del params['name_label']
+ else:
+ label = 'SR-' + str(backend_ref['id'])
+
+ params['sr_type'] = backend_ref['sr_type']
+
+ if backend_ref['sr_uuid'] == None:
+ # run the sr create command
+ try:
+ LOG.debug(_('SR name = %s') % label)
+ LOG.debug(_('Params: %s') % str(params))
+ sr_uuid = self._volumeops.create_sr(label, params)
+ # update sr_uuid and created in db
+ except Exception as ex:
+ LOG.debug(_("Failed to create sr %s...continuing") \
+ % str(backend_ref['id']))
+ raise exception.Error(_('Create failed'))
+
+ LOG.debug(_('SR UUID of new SR is: %s') % sr_uuid)
+ try:
+ self.db.sm_backend_conf_update(context,
+ backend_ref['id'],
+ dict(sr_uuid=sr_uuid))
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to update db"))
+
+ else:
+ # sr introduce, if not already done
+ try:
+ self._volumeops.introduce_sr(backend_ref['sr_uuid'], label,
+ params)
+ except Exception as ex:
+ LOG.exception(ex)
+ LOG.debug(_("Failed to introduce sr %s...continuing") \
+ % str(backend_ref['id']))
+
+ def _create_storage_repos(self, context):
+ """Create/Introduce storage repositories at start."""
+ backends = self.db.sm_backend_conf_get_all(context)
+ for backend in backends:
+ try:
+ self._create_storage_repo(context, backend)
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_('Failed to reach backend %d') \
+ % backend['id'])
+
+ def __init__(self, *args, **kwargs):
+ """Connect to the hypervisor."""
+
+ # This driver leverages Xen storage manager, and hence requires
+ # hypervisor to be Xen
+ if FLAGS.connection_type != 'xenapi':
+ raise exception.Error(_('XenSMDriver requires xenapi connection'))
+
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ try:
+ session = XenAPISession(url, username, password)
+ self._volumeops = VolumeOps(session)
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to initiate session"))
+
+ super(XenSMDriver, self).__init__(execute=utils.execute,
+ sync_exec=utils.execute,
+ *args, **kwargs)
+
+ def do_setup(self, ctxt):
+ """Setup includes creating or introducing storage repos
+ existing in the database and destroying deleted ones."""
+
+ # TODO purge storage repos
+ self.ctxt = ctxt
+ self._create_storage_repos(ctxt)
+
+ def create_volume(self, volume):
+ """Creates a logical volume. Can optionally return a Dictionary of
+ changes to the volume object to be persisted."""
+
+ # For now the scheduling logic will be to try to fit the volume in
+ # the first available backend.
+ # TODO better scheduling once APIs are in place
+ sm_vol_rec = None
+ backends = self.db.sm_backend_conf_get_all(self.ctxt)
+ for backend in backends:
+ # Ensure that storage repo exists, if not create.
+ # This needs to be done because if nova compute and
+ # volume are both running on this host, then, as a
+ # part of detach_volume, compute could potentially forget SR
+ self._create_storage_repo(self.ctxt, backend)
+ sm_vol_rec = self._volumeops.\
+ create_volume_for_sm(volume,
+ backend['sr_uuid'])
+ if sm_vol_rec:
+ LOG.debug(_('Volume will be created in backend - %d') \
+ % backend['id'])
+ break
+
+ if sm_vol_rec:
+ # Update db
+ sm_vol_rec['id'] = volume['id']
+ sm_vol_rec['backend_id'] = backend['id']
+ try:
+ self.db.sm_volume_create(self.ctxt, sm_vol_rec)
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to update volume in db"))
+
+ else:
+ raise exception.Error(_('Unable to create volume'))
+
+ def delete_volume(self, volume):
+
+ vol_rec = self.db.sm_volume_get(self.ctxt, volume['id'])
+
+ try:
+ # If compute runs on this node, detach could have disconnected SR
+ backend_ref = self.db.sm_backend_conf_get(self.ctxt,
+ vol_rec['backend_id'])
+ self._create_storage_repo(self.ctxt, backend_ref)
+ self._volumeops.delete_volume_for_sm(vol_rec['vdi_uuid'])
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to delete vdi"))
+
+ try:
+ self.db.sm_volume_delete(self.ctxt, volume['id'])
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to delete volume in db"))
+
+ def local_path(self, volume):
+ return str(volume['id'])
+
+ def undiscover_volume(self, volume):
+ """Undiscover volume on a remote host."""
+ pass
+
+ def discover_volume(self, context, volume):
+ return str(volume['id'])
+
+ def check_for_setup_error(self):
+ pass
+
+ def create_export(self, context, volume):
+ """Exports the volume."""
+ # !!! TODO
+ pass
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ pass
+
+ def ensure_export(self, context, volume):
+ """Safely, synchronously recreates an export for a logical volume."""
+ pass
+
+ def initialize_connection(self, volume, address):
+ try:
+ xensm_properties = dict(self.db.sm_volume_get(self.ctxt,
+ volume['id']))
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to find volume in db"))
+
+ # Keep the volume id key consistent with what ISCSI driver calls it
+ xensm_properties['volume_id'] = xensm_properties['id']
+ del xensm_properties['id']
+
+ try:
+ backend_conf = self.db.\
+ sm_backend_conf_get(self.ctxt,
+ xensm_properties['backend_id'])
+ except Exception as ex:
+ LOG.exception(ex)
+ raise exception.Error(_("Failed to find backend in db"))
+
+ params = self._convert_config_params(backend_conf['config_params'])
+
+ xensm_properties['flavor_id'] = backend_conf['flavor_id']
+ xensm_properties['sr_uuid'] = backend_conf['sr_uuid']
+ xensm_properties['sr_type'] = backend_conf['sr_type']
+ xensm_properties.update(params)
+ xensm_properties['introduce_sr_keys'] = self.\
+ _get_introduce_sr_keys(params)
+ return {
+ 'driver_volume_type': 'xensm',
+ 'data': xensm_properties
+ }
+
+ def terminate_connection(self, volume, address):
+ pass
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 47052905d..7f3575199 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -26,6 +26,7 @@ try:
import json
except ImportError:
import simplejson as json
+import md5
import os
import os.path
import pickle
@@ -34,6 +35,7 @@ import shutil
import subprocess
import tempfile
import time
+import urllib2
import XenAPIPlugin
@@ -45,6 +47,10 @@ CHUNK_SIZE = 8192
KERNEL_DIR = '/boot/guest'
+class RetryException(Exception):
+ pass
+
+
def _copy_kernel_vdi(dest, copy_args):
vdi_uuid = copy_args['vdi_uuid']
vdi_size = copy_args['vdi_size']
@@ -67,39 +73,89 @@ def _copy_kernel_vdi(dest, copy_args):
return filename
-def _download_tarball(sr_path, staging_path, image_id, glance_host,
- glance_port, auth_token, num_retries):
+def _download_tarball(request, staging_path):
+ """Make one attempt to download and extract the image tarball"""
+ try:
+ response = urllib2.urlopen(request)
+ except urllib2.HTTPError, error:
+ raise RetryException(error)
+ except urllib2.URLError, error:
+ raise RetryException(error)
+
+ tar_cmd = "tar -zx --directory=%(staging_path)s" % locals()
+ tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True)
+
+ checksum = md5.new()
+ etag = response.info().getheader('etag', None)
+ if etag is None:
+ etag = response.info().getheader('x-image-meta-checksum', None)
+
+ url = request.get_full_url()
+ logging.info("Reading image data from %s" % url)
+
+ length_read = 0
+ while True:
+ chunk = response.read(CHUNK_SIZE)
+ if chunk == '':
+ break
+ length_read += len(chunk)
+ checksum.update(chunk)
+ tar_proc.stdin.write(chunk)
+
+ logging.info("Read %(length_read)s bytes from %(url)s" % locals())
+
+ try:
+ _finish_subprocess(tar_proc, tar_cmd)
+ except Exception, error:
+ raise RetryException(error)
+
+ checksum = checksum.hexdigest()
+ if etag is None:
+ msg = "No ETag found for comparison to checksum %(checksum)s"
+ logging.info(msg % locals())
+ elif checksum != etag:
+ msg = 'ETag %(etag)s does not match computed md5sum %(checksum)s'
+ raise RetryException(msg % locals())
+ else:
+ msg = "Verified image checksum %(checksum)s"
+ logging.info(msg % locals())
+
+ return
+
+
+def _download_tarball_with_retry(sr_path, image_id, glance_host,
+ glance_port, auth_token, num_retries):
"""Download the tarball image from Glance and extract it into the staging
- area.
+ area. Retry if there is any failure.
"""
# Build request headers
headers = {}
if auth_token:
headers['x-auth-token'] = auth_token
- conn = httplib.HTTPConnection(glance_host, glance_port)
+ url = "http://%(glance_host)s:%(glance_port)d/v1/images/"\
+ "%(image_id)s" % locals()
+ logging.info("Downloading %s" % url)
- for count in xrange(1 + num_retries):
- conn.request('GET', '/v1/images/%s' % image_id, headers=headers)
- resp = conn.getresponse()
- if resp.status == httplib.OK:
- break
- elif resp.status == httplib.NOT_FOUND:
- raise Exception("Image '%s' not found in Glance" % image_id)
- elif count == num_retries:
- raise Exception("Unexpected response from Glance %i" % resp.status)
- time.sleep(1)
+ request = urllib2.Request(url, headers=headers)
+ sleep_time = 0.5
- tar_cmd = "tar -zx --directory=%(staging_path)s" % locals()
- tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True)
+ for try_num in xrange(1, num_retries + 2):
+ try:
+ staging_path = _make_staging_area(sr_path)
+ _download_tarball(request, staging_path)
+ return staging_path
+ except RetryException, error:
+ msg = "Downloading %(url)s attempt %(try_num)d error: %(error)s"
+ logging.error(msg % locals())
- chunk = resp.read(CHUNK_SIZE)
- while chunk:
- tar_proc.stdin.write(chunk)
- chunk = resp.read(CHUNK_SIZE)
+ _cleanup_staging_area(staging_path)
+ time.sleep(sleep_time)
+ sleep_time = min(2 * sleep_time, 15)
- _finish_subprocess(tar_proc, tar_cmd)
- conn.close()
+ msg = "Unable to retrieve %(url)s after %(try_num)d attempt(s)." % locals()
+ logging.error(msg)
+ raise Exception(msg)
def _import_vhds(sr_path, staging_path, uuid_stack):
@@ -373,7 +429,8 @@ def _cleanup_staging_area(staging_path):
it's safe to remove the staging-area because the SR will keep the link
count > 0 (so the VHDs in the SR will not be deleted).
"""
- shutil.rmtree(staging_path)
+ if os.path.exists(staging_path):
+ shutil.rmtree(staging_path)
def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False):
@@ -412,15 +469,17 @@ def download_vhd(session, args):
auth_token = params["auth_token"]
num_retries = params["num_retries"]
- staging_path = _make_staging_area(sr_path)
+ staging_path = None
try:
- _download_tarball(sr_path, staging_path, image_id, glance_host,
- glance_port, auth_token, num_retries)
+ staging_path = _download_tarball_with_retry(sr_path, image_id,
+ glance_host, glance_port,
+ auth_token, num_retries)
# Right now, it's easier to return a single string via XenAPI,
# so we'll json encode the list of VHDs.
return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack))
finally:
- _cleanup_staging_area(staging_path)
+ if staging_path is not None:
+ _cleanup_staging_area(staging_path)
def upload_vhd(session, args):
diff --git a/run_tests.sh b/run_tests.sh
index 9a69195be..837f9695a 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -8,6 +8,7 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
@@ -29,6 +30,7 @@ function process_option {
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
+ -s|--no-site-packages) no_site_packages=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
@@ -45,6 +47,8 @@ with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
+no_site_packages=0
+installvenvopts=
noseargs=
noseopts=
wrapper=""
@@ -62,6 +66,10 @@ if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=nova"
fi
+if [ $no_site_packages -eq 1 ]; then
+ installvenvopts="--no-site-packages"
+fi
+
function run_tests {
# Just run the test suites in current environment
${wrapper} $NOSETESTS 2> run_tests.log
@@ -123,14 +131,14 @@ then
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
- python tools/install_venv.py
+ python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
- python tools/install_venv.py
+ python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
diff --git a/tools/clean-vlans b/tools/clean-vlans
index a26ad86ad..284e5dc57 100755
--- a/tools/clean-vlans
+++ b/tools/clean-vlans
@@ -22,4 +22,4 @@ export LC_ALL=C
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo
sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
-sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo vconfig rem foo
+sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ip link del foo
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 2ecd446e6..166801e82 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -23,6 +23,7 @@
Installation script for Nova's development virtualenv
"""
+import optparse
import os
import subprocess
import sys
@@ -89,8 +90,18 @@ class Distro(object):
def install_m2crypto(self):
pip_install('M2Crypto')
+ def post_process(self):
+ """Any distribution-specific post-processing gets done here.
+
+ In particular, this is useful for applying patches to code inside
+ the venv."""
+ pass
+
class Fedora(Distro):
+ """This covers all Fedora-based distributions.
+
+ Includes: Fedora, RHEL, CentOS, Scientific Linux"""
def check_pkg(self, pkg):
return run_command_with_code(['rpm', '-q', pkg],
@@ -116,9 +127,30 @@ class Fedora(Distro):
if not self.check_pkg('m2crypto'):
self.yum_install('m2crypto')
+ def post_process(self):
+ """Workaround for a bug in eventlet.
+
+ This currently affects RHEL6.1, but the fix can safely be
+ applied to all RHEL and Fedora distributions.
+
+ This can be removed when the fix is applied upstream
+
+ Nova: https://bugs.launchpad.net/nova/+bug/884915
+ Upstream: https://bitbucket.org/which_linden/eventlet/issue/89"""
+
+ # Install "patch" program if it's not there
+ if not self.check_pkg('patch'):
+ self.yum_install('patch')
+
+ # Apply the eventlet patch
+ run_command(['patch',
+ '.nova-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py',
+ 'contrib/redhat-eventlet.patch'])
+
def get_distro():
- if os.path.exists('/etc/fedora-release'):
+ if os.path.exists('/etc/fedora-release') or \
+ os.path.exists('/etc/redhat-release'):
return Fedora()
else:
return Distro()
@@ -128,12 +160,15 @@ def check_dependencies():
get_distro().install_virtualenv()
-def create_virtualenv(venv=VENV):
+def create_virtualenv(venv=VENV, no_site_packages=True):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
- run_command(['virtualenv', '-q', VENV])
+ if no_site_packages:
+ run_command(['virtualenv', '-q', '--no-site-packages', VENV])
+ else:
+ run_command(['virtualenv', '-q', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
@@ -169,6 +204,10 @@ def install_dependencies(venv=VENV):
f.write("%s\n" % ROOT)
+def post_process():
+ get_distro().post_process()
+
+
def print_help():
help = """
Nova development environment setup is complete.
@@ -191,11 +230,22 @@ def print_help():
print help
+def parse_args():
+ """Parse command-line arguments"""
+ parser = optparse.OptionParser()
+ parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
+ default=False, action="store_true",
+ help="Do not inherit packages from global Python install")
+ return parser.parse_args()
+
+
def main(argv):
+ (options, args) = parse_args()
check_python_version()
check_dependencies()
- create_virtualenv()
+ create_virtualenv(no_site_packages=options.no_site_packages)
install_dependencies()
+ post_process()
print_help()
if __name__ == '__main__':