summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--.mailmap3
-rw-r--r--Authors7
-rw-r--r--MANIFEST.in1
-rwxr-xr-xbin/nova-ajax-console-proxy18
-rwxr-xr-xbin/nova-api17
-rwxr-xr-xbin/nova-dhcpbridge11
-rwxr-xr-xbin/nova-direct-api4
-rwxr-xr-xbin/nova-manage83
-rwxr-xr-xcontrib/nova.sh12
-rw-r--r--doc/.autogenerated283
-rw-r--r--doc/build/html/.buildinfo4
-rw-r--r--doc/source/adminguide/distros/others.rst88
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.04.rst40
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.10.rst41
-rw-r--r--doc/source/adminguide/flags.rst23
-rw-r--r--doc/source/adminguide/multi.node.install.rst392
-rw-r--r--doc/source/adminguide/single.node.install.rst362
-rw-r--r--doc/source/api/autoindex.rst144
-rw-r--r--doc/source/api/nova..adminclient.rst6
-rw-r--r--doc/source/api/nova..api.direct.rst6
-rw-r--r--doc/source/api/nova..api.ec2.admin.rst6
-rw-r--r--doc/source/api/nova..api.ec2.apirequest.rst6
-rw-r--r--doc/source/api/nova..api.ec2.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.metadatarequesthandler.rst6
-rw-r--r--doc/source/api/nova..api.openstack.auth.rst6
-rw-r--r--doc/source/api/nova..api.openstack.backup_schedules.rst6
-rw-r--r--doc/source/api/nova..api.openstack.common.rst6
-rw-r--r--doc/source/api/nova..api.openstack.consoles.rst6
-rw-r--r--doc/source/api/nova..api.openstack.faults.rst6
-rw-r--r--doc/source/api/nova..api.openstack.flavors.rst6
-rw-r--r--doc/source/api/nova..api.openstack.images.rst6
-rw-r--r--doc/source/api/nova..api.openstack.servers.rst6
-rw-r--r--doc/source/api/nova..api.openstack.shared_ip_groups.rst6
-rw-r--r--doc/source/api/nova..api.openstack.zones.rst6
-rw-r--r--doc/source/api/nova..auth.dbdriver.rst6
-rw-r--r--doc/source/api/nova..auth.fakeldap.rst6
-rw-r--r--doc/source/api/nova..auth.ldapdriver.rst6
-rw-r--r--doc/source/api/nova..auth.manager.rst6
-rw-r--r--doc/source/api/nova..auth.signer.rst6
-rw-r--r--doc/source/api/nova..cloudpipe.pipelib.rst6
-rw-r--r--doc/source/api/nova..compute.api.rst6
-rw-r--r--doc/source/api/nova..compute.instance_types.rst6
-rw-r--r--doc/source/api/nova..compute.manager.rst6
-rw-r--r--doc/source/api/nova..compute.monitor.rst6
-rw-r--r--doc/source/api/nova..compute.power_state.rst6
-rw-r--r--doc/source/api/nova..console.api.rst6
-rw-r--r--doc/source/api/nova..console.fake.rst6
-rw-r--r--doc/source/api/nova..console.manager.rst6
-rw-r--r--doc/source/api/nova..console.xvp.rst6
-rw-r--r--doc/source/api/nova..context.rst6
-rw-r--r--doc/source/api/nova..crypto.rst6
-rw-r--r--doc/source/api/nova..db.api.rst6
-rw-r--r--doc/source/api/nova..db.base.rst6
-rw-r--r--doc/source/api/nova..db.migration.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.migration.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.models.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.session.rst6
-rw-r--r--doc/source/api/nova..exception.rst6
-rw-r--r--doc/source/api/nova..fakememcache.rst6
-rw-r--r--doc/source/api/nova..fakerabbit.rst6
-rw-r--r--doc/source/api/nova..flags.rst6
-rw-r--r--doc/source/api/nova..image.glance.rst6
-rw-r--r--doc/source/api/nova..image.local.rst6
-rw-r--r--doc/source/api/nova..image.s3.rst6
-rw-r--r--doc/source/api/nova..image.service.rst6
-rw-r--r--doc/source/api/nova..log.rst6
-rw-r--r--doc/source/api/nova..manager.rst6
-rw-r--r--doc/source/api/nova..network.api.rst6
-rw-r--r--doc/source/api/nova..network.linux_net.rst6
-rw-r--r--doc/source/api/nova..network.manager.rst6
-rw-r--r--doc/source/api/nova..objectstore.bucket.rst6
-rw-r--r--doc/source/api/nova..objectstore.handler.rst6
-rw-r--r--doc/source/api/nova..objectstore.image.rst6
-rw-r--r--doc/source/api/nova..objectstore.stored.rst6
-rw-r--r--doc/source/api/nova..quota.rst6
-rw-r--r--doc/source/api/nova..rpc.rst6
-rw-r--r--doc/source/api/nova..scheduler.chance.rst6
-rw-r--r--doc/source/api/nova..scheduler.driver.rst6
-rw-r--r--doc/source/api/nova..scheduler.manager.rst6
-rw-r--r--doc/source/api/nova..scheduler.simple.rst6
-rw-r--r--doc/source/api/nova..scheduler.zone.rst6
-rw-r--r--doc/source/api/nova..service.rst6
-rw-r--r--doc/source/api/nova..test.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_adminapi.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_common.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_faults.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_flavors.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_images.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_servers.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_zones.rst6
-rw-r--r--doc/source/api/nova..tests.api.test_wsgi.rst6
-rw-r--r--doc/source/api/nova..tests.db.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.declare_flags.rst6
-rw-r--r--doc/source/api/nova..tests.fake_flags.rst6
-rw-r--r--doc/source/api/nova..tests.glance.stubs.rst6
-rw-r--r--doc/source/api/nova..tests.hyperv_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.objectstore_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.real_flags.rst6
-rw-r--r--doc/source/api/nova..tests.runtime_flags.rst6
-rw-r--r--doc/source/api/nova..tests.test_access.rst6
-rw-r--r--doc/source/api/nova..tests.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.test_cloud.rst6
-rw-r--r--doc/source/api/nova..tests.test_compute.rst6
-rw-r--r--doc/source/api/nova..tests.test_console.rst6
-rw-r--r--doc/source/api/nova..tests.test_direct.rst6
-rw-r--r--doc/source/api/nova..tests.test_flags.rst6
-rw-r--r--doc/source/api/nova..tests.test_instance_types.rst6
-rw-r--r--doc/source/api/nova..tests.test_localization.rst6
-rw-r--r--doc/source/api/nova..tests.test_log.rst6
-rw-r--r--doc/source/api/nova..tests.test_middleware.rst6
-rw-r--r--doc/source/api/nova..tests.test_misc.rst6
-rw-r--r--doc/source/api/nova..tests.test_network.rst6
-rw-r--r--doc/source/api/nova..tests.test_quota.rst6
-rw-r--r--doc/source/api/nova..tests.test_rpc.rst6
-rw-r--r--doc/source/api/nova..tests.test_scheduler.rst6
-rw-r--r--doc/source/api/nova..tests.test_service.rst6
-rw-r--r--doc/source/api/nova..tests.test_test.rst6
-rw-r--r--doc/source/api/nova..tests.test_twistd.rst6
-rw-r--r--doc/source/api/nova..tests.test_utils.rst6
-rw-r--r--doc/source/api/nova..tests.test_virt.rst6
-rw-r--r--doc/source/api/nova..tests.test_volume.rst6
-rw-r--r--doc/source/api/nova..tests.test_xenapi.rst6
-rw-r--r--doc/source/api/nova..tests.xenapi.stubs.rst6
-rw-r--r--doc/source/api/nova..twistd.rst6
-rw-r--r--doc/source/api/nova..utils.rst6
-rw-r--r--doc/source/api/nova..version.rst6
-rw-r--r--doc/source/api/nova..virt.connection.rst6
-rw-r--r--doc/source/api/nova..virt.disk.rst6
-rw-r--r--doc/source/api/nova..virt.fake.rst6
-rw-r--r--doc/source/api/nova..virt.hyperv.rst6
-rw-r--r--doc/source/api/nova..virt.images.rst6
-rw-r--r--doc/source/api/nova..virt.libvirt_conn.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.fake.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.network_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.vm_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.vmops.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.volume_utils.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.volumeops.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi_conn.rst6
-rw-r--r--doc/source/api/nova..volume.api.rst6
-rw-r--r--doc/source/api/nova..volume.driver.rst6
-rw-r--r--doc/source/api/nova..volume.manager.rst6
-rw-r--r--doc/source/api/nova..volume.san.rst6
-rw-r--r--doc/source/api/nova..wsgi.rst6
-rw-r--r--doc/source/community.rst12
-rw-r--r--doc/source/index.rst20
-rw-r--r--doc/source/man/novamanage.rst36
-rw-r--r--doc/source/nova.concepts.rst5
-rw-r--r--doc/source/object.model.rst14
-rw-r--r--doc/source/quickstart.rst2
-rw-r--r--doc/source/runnova/binaries.rst (renamed from doc/source/adminguide/binaries.rst)4
-rw-r--r--doc/source/runnova/euca2ools.rst (renamed from doc/source/adminguide/euca2ools.rst)0
-rw-r--r--doc/source/runnova/flags.rst193
-rw-r--r--doc/source/runnova/getting.started.rst (renamed from doc/source/adminguide/getting.started.rst)15
-rw-r--r--doc/source/runnova/index.rst (renamed from doc/source/adminguide/index.rst)15
-rw-r--r--doc/source/runnova/managing.images.rst (renamed from doc/source/adminguide/managing.images.rst)0
-rw-r--r--doc/source/runnova/managing.instance.types.rst84
-rw-r--r--doc/source/runnova/managing.instances.rst (renamed from doc/source/adminguide/managing.instances.rst)0
-rw-r--r--doc/source/runnova/managing.networks.rst (renamed from doc/source/adminguide/managing.networks.rst)0
-rw-r--r--doc/source/runnova/managing.projects.rst (renamed from doc/source/adminguide/managing.projects.rst)0
-rw-r--r--doc/source/runnova/managing.users.rst (renamed from doc/source/adminguide/managing.users.rst)0
-rw-r--r--doc/source/runnova/managingsecurity.rst (renamed from doc/source/adminguide/managingsecurity.rst)0
-rw-r--r--doc/source/runnova/monitoring.rst (renamed from doc/source/adminguide/monitoring.rst)0
-rw-r--r--doc/source/runnova/network.flat.rst (renamed from doc/source/adminguide/network.flat.rst)0
-rw-r--r--doc/source/runnova/network.vlan.rst (renamed from doc/source/adminguide/network.vlan.rst)0
-rw-r--r--doc/source/runnova/nova.manage.rst (renamed from doc/source/adminguide/nova.manage.rst)0
-rw-r--r--nova/__init__.py2
-rw-r--r--nova/adminclient.py63
-rw-r--r--nova/api/ec2/admin.py122
-rw-r--r--nova/api/ec2/apirequest.py18
-rw-r--r--nova/api/ec2/cloud.py31
-rw-r--r--nova/api/openstack/__init__.py4
-rw-r--r--nova/api/openstack/auth.py14
-rw-r--r--nova/api/openstack/common.py8
-rw-r--r--nova/api/openstack/flavors.py20
-rw-r--r--nova/api/openstack/servers.py190
-rw-r--r--nova/apiservice.py99
-rw-r--r--nova/compute/api.py90
-rw-r--r--nova/compute/instance_types.py123
-rw-r--r--nova/compute/manager.py152
-rw-r--r--nova/db/api.py85
-rw-r--r--nova/db/sqlalchemy/api.py185
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py78
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py90
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py61
-rw-r--r--nova/db/sqlalchemy/migration.py2
-rw-r--r--nova/db/sqlalchemy/models.py59
-rw-r--r--nova/flags.py52
-rw-r--r--nova/log.py7
-rw-r--r--nova/network/manager.py171
-rw-r--r--nova/quota.py14
-rw-r--r--nova/rpc.py15
-rw-r--r--nova/service.py111
-rw-r--r--nova/test.py32
-rw-r--r--nova/tests/__init__.py25
-rw-r--r--nova/tests/api/openstack/__init__.py4
-rw-r--r--nova/tests/api/openstack/common.py35
-rw-r--r--nova/tests/api/openstack/fakes.py25
-rw-r--r--nova/tests/api/openstack/test_adminapi.py11
-rw-r--r--nova/tests/api/openstack/test_api.py4
-rw-r--r--nova/tests/api/openstack/test_auth.py52
-rw-r--r--nova/tests/api/openstack/test_common.py26
-rw-r--r--nova/tests/api/openstack/test_faults.py4
-rw-r--r--nova/tests/api/openstack/test_flavors.py18
-rw-r--r--nova/tests/api/openstack/test_images.py14
-rw-r--r--nova/tests/api/openstack/test_ratelimiting.py15
-rw-r--r--nova/tests/api/openstack/test_servers.py163
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py7
-rw-r--r--nova/tests/api/openstack/test_zones.py10
-rw-r--r--nova/tests/api/test_wsgi.py6
-rw-r--r--nova/tests/db/fakes.py21
-rw-r--r--nova/tests/fake_flags.py7
-rw-r--r--nova/tests/glance/stubs.py40
-rw-r--r--nova/tests/objectstore_unittest.py1
-rw-r--r--nova/tests/test_cloud.py106
-rw-r--r--nova/tests/test_compute.py32
-rw-r--r--nova/tests/test_direct.py1
-rw-r--r--nova/tests/test_instance_types.py86
-rw-r--r--nova/tests/test_network.py11
-rw-r--r--nova/tests/test_quota.py42
-rw-r--r--nova/tests/test_scheduler.py2
-rw-r--r--nova/tests/test_service.py7
-rw-r--r--nova/tests/test_utils.py174
-rw-r--r--nova/tests/test_virt.py6
-rw-r--r--nova/tests/test_xenapi.py112
-rw-r--r--nova/tests/xenapi/stubs.py56
-rw-r--r--nova/utils.py51
-rw-r--r--nova/virt/disk.py4
-rw-r--r--nova/virt/fake.py35
-rw-r--r--nova/virt/libvirt_conn.py10
-rw-r--r--nova/virt/xenapi/fake.py5
-rw-r--r--nova/virt/xenapi/vm_utils.py307
-rw-r--r--nova/virt/xenapi/vmops.py497
-rw-r--r--nova/virt/xenapi/volumeops.py2
-rw-r--r--nova/virt/xenapi_conn.py50
-rw-r--r--nova/volume/driver.py179
-rw-r--r--nova/volume/manager.py8
-rw-r--r--nova/volume/san.py312
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance397
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/migration117
-rw-r--r--run_tests.py233
-rwxr-xr-xrun_tests.sh18
-rw-r--r--smoketests/base.py60
-rw-r--r--smoketests/flags.py4
-rw-r--r--smoketests/netadmin_smoketests.py194
-rwxr-xr-xsmoketests/proxy.sh22
-rw-r--r--smoketests/public_network_smoketests.py11
-rw-r--r--smoketests/sysadmin_smoketests.py (renamed from smoketests/user_smoketests.py)241
266 files changed, 6587 insertions, 2130 deletions
diff --git a/.bzrignore b/.bzrignore
index b271561a3..d22b62629 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -13,3 +13,4 @@ CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
nova/vcsversion.py
+*.DS_Store
diff --git a/.mailmap b/.mailmap
index a839eba6c..ed4404ad5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -15,10 +15,12 @@
<corywright@gmail.com> <cory.wright@rackspace.com>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
+<itoumsn@nttdata.co.jp> <itoumsn@shayol>
<jaypipes@gmail.com> <jpipes@serialcoder>
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
+<josh@jk0.org> <josh.kearney@rackspace.com>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<justin@fathomdb.com> <superstack@superstack.org>
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
@@ -40,4 +42,5 @@
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
+<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
diff --git a/Authors b/Authors
index 494e614a0..7993955e2 100644
--- a/Authors
+++ b/Authors
@@ -31,13 +31,15 @@ John Dewey <john@dewey.ws>
Jonathan Bryce <jbryce@jbryce.com>
Jordan Rinke <jordan@openstack.org>
Josh Durgin <joshd@hq.newdream.net>
-Josh Kearney <josh.kearney@rackspace.com>
+Josh Kearney <josh@jk0.org>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
+Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
+Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
@@ -45,7 +47,8 @@ Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
-Naveed Massjouni <naveed.massjouni@rackspace.com>
+Naveed Massjouni <naveedm9@gmail.com>
+Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
Paul Voccio <paul@openstack.org>
Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org>
diff --git a/MANIFEST.in b/MANIFEST.in
index f0a9cffb3..2ceed34f3 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -38,3 +38,4 @@ include nova/tests/db/nova.austin.sqlite
include plugins/xenapi/README
include plugins/xenapi/etc/xapi.d/plugins/objectstore
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+global-exclude *.pyc
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
index 392b328b1..bbd60bade 100755
--- a/bin/nova-ajax-console-proxy
+++ b/bin/nova-ajax-console-proxy
@@ -47,9 +47,11 @@ from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
-
flags.DEFINE_integer('ajax_console_idle_timeout', 300,
'Seconds before idle connection destroyed')
+flags.DEFINE_flag(flags.HelpFlag())
+flags.DEFINE_flag(flags.HelpshortFlag())
+flags.DEFINE_flag(flags.HelpXMLFlag())
LOG = logging.getLogger('nova.ajax_console_proxy')
LOG.setLevel(logging.DEBUG)
@@ -61,10 +63,16 @@ class AjaxConsoleProxy(object):
def __call__(self, env, start_response):
try:
- req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
- env['HTTP_HOST'],
- env['PATH_INFO'],
- env['QUERY_STRING'])
+ if 'QUERY_STRING' in env:
+ req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'],
+ env['QUERY_STRING'])
+ else:
+ req_url = '%s://%s%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'])
+
if 'HTTP_REFERER' in env:
auth_url = env['HTTP_REFERER']
else:
diff --git a/bin/nova-api b/bin/nova-api
index 933202dc8..2d2ef6d0c 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -34,20 +34,33 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
-from nova import apiservice
+from nova import service
from nova import flags
from nova import log as logging
+from nova import utils
+from nova import version
from nova import wsgi
+
+LOG = logging.getLogger('nova.api')
+
FLAGS = flags.FLAGS
+
if __name__ == '__main__':
+ utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
+ LOG.audit(_("Starting nova-api node (version %s)"),
+ version.version_string_with_vcs())
+ LOG.debug(_("Full set of FLAGS:"))
+ for flag in FLAGS:
+ flag_get = FLAGS.get(flag, None)
+ LOG.debug("%(flag)s : %(flag_get)s" % locals())
conf = wsgi.paste_config_file('nova-api.conf')
if not conf:
LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf')
sys.exit(1)
else:
- service = apiservice.serve(conf)
+ service = service.serve_wsgi(service.ApiService, conf)
service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 35b837ca9..3dd9de367 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -105,16 +105,7 @@ def main():
logging.setup()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
- FLAGS.fake_rabbit = True
- FLAGS.network_size = 16
- FLAGS.connection_type = 'fake'
- FLAGS.fake_network = True
- FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
- FLAGS.num_networks = 5
- path = os.path.abspath(os.path.join(os.path.dirname(__file__),
- '..',
- 'nova.sqlite'))
- FLAGS.sql_connection = 'sqlite:///%s' % path
+ from nova.tests import fake_flags
action = argv[1]
if action in ['add', 'del', 'old']:
mac = argv[2]
diff --git a/bin/nova-direct-api b/bin/nova-direct-api
index 6c63bd26b..bf29d9a5e 100755
--- a/bin/nova-direct-api
+++ b/bin/nova-direct-api
@@ -45,6 +45,10 @@ from nova.compute import api as compute_api
FLAGS = flags.FLAGS
flags.DEFINE_integer('direct_port', 8001, 'Direct API port')
flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host')
+flags.DEFINE_flag(flags.HelpFlag())
+flags.DEFINE_flag(flags.HelpshortFlag())
+flags.DEFINE_flag(flags.HelpXMLFlag())
+
if __name__ == '__main__':
utils.default_flagfile()
diff --git a/bin/nova-manage b/bin/nova-manage
index 5189de0e1..9bf3a1bb3 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -84,6 +84,7 @@ from nova import utils
from nova.api.ec2.cloud import ec2_id_to_id
from nova.auth import manager
from nova.cloudpipe import pipelib
+from nova.compute import instance_types
from nova.db import migration
FLAGS = flags.FLAGS
@@ -93,6 +94,9 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
+flags.DEFINE_flag(flags.HelpFlag())
+flags.DEFINE_flag(flags.HelpshortFlag())
+flags.DEFINE_flag(flags.HelpXMLFlag())
def param2id(object_id):
@@ -550,7 +554,7 @@ class ServiceCommands(object):
args: [host] [service]"""
ctxt = context.get_admin_context()
now = datetime.datetime.utcnow()
- services = db.service_get_all(ctxt)
+ services = db.service_get_all(ctxt) + db.service_get_all(ctxt, True)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -658,6 +662,79 @@ class VolumeCommands(object):
"mountpoint": volume['mountpoint']}})
+class InstanceTypeCommands(object):
+ """Class for managing instance types / flavors."""
+
+ def _print_instance_types(self, n, val):
+ deleted = ('', ', inactive')[val["deleted"] == 1]
+ print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, "
+ "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % (
+ n, val["memory_mb"], val["vcpus"], val["local_gb"],
+ val["flavorid"], val["swap"], val["rxtx_quota"],
+ val["rxtx_cap"], deleted)
+
+ def create(self, name, memory, vcpus, local_gb, flavorid,
+ swap=0, rxtx_quota=0, rxtx_cap=0):
+ """Creates instance types / flavors
+ arguments: name memory vcpus local_gb flavorid [swap] [rxtx_quota]
+ [rxtx_cap]
+ """
+ try:
+ instance_types.create(name, memory, vcpus, local_gb,
+ flavorid, swap, rxtx_quota, rxtx_cap)
+ except exception.InvalidInputException:
+ print "Must supply valid parameters to create instance type"
+ print e
+ sys.exit(1)
+ except exception.DBError, e:
+ print "DB Error: %s" % e
+ sys.exit(2)
+ except:
+ print "Unknown error"
+ sys.exit(3)
+ else:
+ print "%s created" % name
+
+ def delete(self, name, purge=None):
+ """Marks instance types / flavors as deleted
+ arguments: name"""
+ try:
+ if purge == "--purge":
+ instance_types.purge(name)
+ verb = "purged"
+ else:
+ instance_types.destroy(name)
+ verb = "deleted"
+ except exception.ApiError:
+ print "Valid instance type name is required"
+ sys.exit(1)
+ except exception.DBError, e:
+ print "DB Error: %s" % e
+ sys.exit(2)
+ except:
+ sys.exit(3)
+ else:
+ print "%s %s" % (name, verb)
+
+ def list(self, name=None):
+ """Lists all active or specific instance types / flavors
+ arguments: [name]"""
+ try:
+ if name == None:
+ inst_types = instance_types.get_all_types()
+ elif name == "--all":
+ inst_types = instance_types.get_all_types(1)
+ else:
+ inst_types = instance_types.get_instance_type(name)
+ except exception.DBError, e:
+ _db_error(e)
+ if isinstance(inst_types.values()[0], dict):
+ for k, v in inst_types.iteritems():
+ self._print_instance_types(k, v)
+ else:
+ self._print_instance_types(name, inst_types)
+
+
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -670,7 +747,9 @@ CATEGORIES = [
('service', ServiceCommands),
('log', LogCommands),
('db', DbCommands),
- ('volume', VolumeCommands)]
+ ('volume', VolumeCommands),
+ ('instance_type', InstanceTypeCommands),
+ ('flavor', InstanceTypeCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/contrib/nova.sh b/contrib/nova.sh
index 9259035ca..1187f2728 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -66,7 +66,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
- sudo apt-get install -y socat
+ sudo apt-get install -y socat unzip
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
@@ -111,8 +111,7 @@ if [ "$CMD" == "run" ]; then
--nodaemon
--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
--network_manager=nova.network.manager.$NET_MAN
---cc_host=$HOST_IP
---routing_source_ip=$HOST_IP
+--my_ip=$HOST_IP
--sql_connection=$SQL_CONN
--auth_driver=nova.auth.$AUTH
--libvirt_type=$LIBVIRT_TYPE
@@ -151,7 +150,6 @@ NOVA_CONF_EOF
mkdir -p $NOVA_DIR/instances
rm -rf $NOVA_DIR/networks
mkdir -p $NOVA_DIR/networks
- $NOVA_DIR/tools/clean-vlans
if [ ! -d "$NOVA_DIR/images" ]; then
ln -s $DIR/images $NOVA_DIR/images
fi
@@ -169,10 +167,14 @@ NOVA_CONF_EOF
# create a project called 'admin' with project manager of 'admin'
$NOVA_DIR/bin/nova-manage project create admin admin
# export environment variables for project 'admin' and user 'admin'
- $NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc
+ $NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
+ unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
# create a small network
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
+ # create some floating ips
+ $NOVA_DIR/bin/nova-manage floating create `hostname` 10.6.0.0/27
+
# nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window.
screen_it api "$NOVA_DIR/bin/nova-api"
diff --git a/doc/.autogenerated b/doc/.autogenerated
new file mode 100644
index 000000000..456c8ad1e
--- /dev/null
+++ b/doc/.autogenerated
@@ -0,0 +1,283 @@
+source/api/nova..adminclient.rst
+source/api/nova..api.direct.rst
+source/api/nova..api.ec2.admin.rst
+source/api/nova..api.ec2.apirequest.rst
+source/api/nova..api.ec2.cloud.rst
+source/api/nova..api.ec2.metadatarequesthandler.rst
+source/api/nova..api.openstack.auth.rst
+source/api/nova..api.openstack.backup_schedules.rst
+source/api/nova..api.openstack.common.rst
+source/api/nova..api.openstack.consoles.rst
+source/api/nova..api.openstack.faults.rst
+source/api/nova..api.openstack.flavors.rst
+source/api/nova..api.openstack.images.rst
+source/api/nova..api.openstack.servers.rst
+source/api/nova..api.openstack.shared_ip_groups.rst
+source/api/nova..api.openstack.zones.rst
+source/api/nova..auth.dbdriver.rst
+source/api/nova..auth.fakeldap.rst
+source/api/nova..auth.ldapdriver.rst
+source/api/nova..auth.manager.rst
+source/api/nova..auth.signer.rst
+source/api/nova..cloudpipe.pipelib.rst
+source/api/nova..compute.api.rst
+source/api/nova..compute.instance_types.rst
+source/api/nova..compute.manager.rst
+source/api/nova..compute.monitor.rst
+source/api/nova..compute.power_state.rst
+source/api/nova..console.api.rst
+source/api/nova..console.fake.rst
+source/api/nova..console.manager.rst
+source/api/nova..console.xvp.rst
+source/api/nova..context.rst
+source/api/nova..crypto.rst
+source/api/nova..db.api.rst
+source/api/nova..db.base.rst
+source/api/nova..db.migration.rst
+source/api/nova..db.sqlalchemy.api.rst
+source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+source/api/nova..db.sqlalchemy.migration.rst
+source/api/nova..db.sqlalchemy.models.rst
+source/api/nova..db.sqlalchemy.session.rst
+source/api/nova..exception.rst
+source/api/nova..fakememcache.rst
+source/api/nova..fakerabbit.rst
+source/api/nova..flags.rst
+source/api/nova..image.glance.rst
+source/api/nova..image.local.rst
+source/api/nova..image.s3.rst
+source/api/nova..image.service.rst
+source/api/nova..log.rst
+source/api/nova..manager.rst
+source/api/nova..network.api.rst
+source/api/nova..network.linux_net.rst
+source/api/nova..network.manager.rst
+source/api/nova..objectstore.bucket.rst
+source/api/nova..objectstore.handler.rst
+source/api/nova..objectstore.image.rst
+source/api/nova..objectstore.stored.rst
+source/api/nova..quota.rst
+source/api/nova..rpc.rst
+source/api/nova..scheduler.chance.rst
+source/api/nova..scheduler.driver.rst
+source/api/nova..scheduler.manager.rst
+source/api/nova..scheduler.simple.rst
+source/api/nova..scheduler.zone.rst
+source/api/nova..service.rst
+source/api/nova..test.rst
+source/api/nova..tests.api.openstack.fakes.rst
+source/api/nova..tests.api.openstack.test_adminapi.rst
+source/api/nova..tests.api.openstack.test_api.rst
+source/api/nova..tests.api.openstack.test_auth.rst
+source/api/nova..tests.api.openstack.test_common.rst
+source/api/nova..tests.api.openstack.test_faults.rst
+source/api/nova..tests.api.openstack.test_flavors.rst
+source/api/nova..tests.api.openstack.test_images.rst
+source/api/nova..tests.api.openstack.test_ratelimiting.rst
+source/api/nova..tests.api.openstack.test_servers.rst
+source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
+source/api/nova..tests.api.openstack.test_zones.rst
+source/api/nova..tests.api.test_wsgi.rst
+source/api/nova..tests.db.fakes.rst
+source/api/nova..tests.declare_flags.rst
+source/api/nova..tests.fake_flags.rst
+source/api/nova..tests.glance.stubs.rst
+source/api/nova..tests.hyperv_unittest.rst
+source/api/nova..tests.objectstore_unittest.rst
+source/api/nova..tests.real_flags.rst
+source/api/nova..tests.runtime_flags.rst
+source/api/nova..tests.test_access.rst
+source/api/nova..tests.test_api.rst
+source/api/nova..tests.test_auth.rst
+source/api/nova..tests.test_cloud.rst
+source/api/nova..tests.test_compute.rst
+source/api/nova..tests.test_console.rst
+source/api/nova..tests.test_direct.rst
+source/api/nova..tests.test_flags.rst
+source/api/nova..tests.test_instance_types.rst
+source/api/nova..tests.test_localization.rst
+source/api/nova..tests.test_log.rst
+source/api/nova..tests.test_middleware.rst
+source/api/nova..tests.test_misc.rst
+source/api/nova..tests.test_network.rst
+source/api/nova..tests.test_quota.rst
+source/api/nova..tests.test_rpc.rst
+source/api/nova..tests.test_scheduler.rst
+source/api/nova..tests.test_service.rst
+source/api/nova..tests.test_test.rst
+source/api/nova..tests.test_twistd.rst
+source/api/nova..tests.test_utils.rst
+source/api/nova..tests.test_virt.rst
+source/api/nova..tests.test_volume.rst
+source/api/nova..tests.test_xenapi.rst
+source/api/nova..tests.xenapi.stubs.rst
+source/api/nova..twistd.rst
+source/api/nova..utils.rst
+source/api/nova..version.rst
+source/api/nova..virt.connection.rst
+source/api/nova..virt.disk.rst
+source/api/nova..virt.fake.rst
+source/api/nova..virt.hyperv.rst
+source/api/nova..virt.images.rst
+source/api/nova..virt.libvirt_conn.rst
+source/api/nova..virt.xenapi.fake.rst
+source/api/nova..virt.xenapi.network_utils.rst
+source/api/nova..virt.xenapi.vm_utils.rst
+source/api/nova..virt.xenapi.vmops.rst
+source/api/nova..virt.xenapi.volume_utils.rst
+source/api/nova..virt.xenapi.volumeops.rst
+source/api/nova..virt.xenapi_conn.rst
+source/api/nova..volume.api.rst
+source/api/nova..volume.driver.rst
+source/api/nova..volume.manager.rst
+source/api/nova..volume.san.rst
+source/api/nova..wsgi.rst
+source/api/autoindex.rst
+source/api/nova..adminclient.rst
+source/api/nova..api.direct.rst
+source/api/nova..api.ec2.admin.rst
+source/api/nova..api.ec2.apirequest.rst
+source/api/nova..api.ec2.cloud.rst
+source/api/nova..api.ec2.metadatarequesthandler.rst
+source/api/nova..api.openstack.auth.rst
+source/api/nova..api.openstack.backup_schedules.rst
+source/api/nova..api.openstack.common.rst
+source/api/nova..api.openstack.consoles.rst
+source/api/nova..api.openstack.faults.rst
+source/api/nova..api.openstack.flavors.rst
+source/api/nova..api.openstack.images.rst
+source/api/nova..api.openstack.servers.rst
+source/api/nova..api.openstack.shared_ip_groups.rst
+source/api/nova..api.openstack.zones.rst
+source/api/nova..auth.dbdriver.rst
+source/api/nova..auth.fakeldap.rst
+source/api/nova..auth.ldapdriver.rst
+source/api/nova..auth.manager.rst
+source/api/nova..auth.signer.rst
+source/api/nova..cloudpipe.pipelib.rst
+source/api/nova..compute.api.rst
+source/api/nova..compute.instance_types.rst
+source/api/nova..compute.manager.rst
+source/api/nova..compute.monitor.rst
+source/api/nova..compute.power_state.rst
+source/api/nova..console.api.rst
+source/api/nova..console.fake.rst
+source/api/nova..console.manager.rst
+source/api/nova..console.xvp.rst
+source/api/nova..context.rst
+source/api/nova..crypto.rst
+source/api/nova..db.api.rst
+source/api/nova..db.base.rst
+source/api/nova..db.migration.rst
+source/api/nova..db.sqlalchemy.api.rst
+source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+source/api/nova..db.sqlalchemy.migration.rst
+source/api/nova..db.sqlalchemy.models.rst
+source/api/nova..db.sqlalchemy.session.rst
+source/api/nova..exception.rst
+source/api/nova..fakememcache.rst
+source/api/nova..fakerabbit.rst
+source/api/nova..flags.rst
+source/api/nova..image.glance.rst
+source/api/nova..image.local.rst
+source/api/nova..image.s3.rst
+source/api/nova..image.service.rst
+source/api/nova..log.rst
+source/api/nova..manager.rst
+source/api/nova..network.api.rst
+source/api/nova..network.linux_net.rst
+source/api/nova..network.manager.rst
+source/api/nova..objectstore.bucket.rst
+source/api/nova..objectstore.handler.rst
+source/api/nova..objectstore.image.rst
+source/api/nova..objectstore.stored.rst
+source/api/nova..quota.rst
+source/api/nova..rpc.rst
+source/api/nova..scheduler.chance.rst
+source/api/nova..scheduler.driver.rst
+source/api/nova..scheduler.manager.rst
+source/api/nova..scheduler.simple.rst
+source/api/nova..scheduler.zone.rst
+source/api/nova..service.rst
+source/api/nova..test.rst
+source/api/nova..tests.api.openstack.fakes.rst
+source/api/nova..tests.api.openstack.test_adminapi.rst
+source/api/nova..tests.api.openstack.test_api.rst
+source/api/nova..tests.api.openstack.test_auth.rst
+source/api/nova..tests.api.openstack.test_common.rst
+source/api/nova..tests.api.openstack.test_faults.rst
+source/api/nova..tests.api.openstack.test_flavors.rst
+source/api/nova..tests.api.openstack.test_images.rst
+source/api/nova..tests.api.openstack.test_ratelimiting.rst
+source/api/nova..tests.api.openstack.test_servers.rst
+source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
+source/api/nova..tests.api.openstack.test_zones.rst
+source/api/nova..tests.api.test_wsgi.rst
+source/api/nova..tests.db.fakes.rst
+source/api/nova..tests.declare_flags.rst
+source/api/nova..tests.fake_flags.rst
+source/api/nova..tests.glance.stubs.rst
+source/api/nova..tests.hyperv_unittest.rst
+source/api/nova..tests.objectstore_unittest.rst
+source/api/nova..tests.real_flags.rst
+source/api/nova..tests.runtime_flags.rst
+source/api/nova..tests.test_access.rst
+source/api/nova..tests.test_api.rst
+source/api/nova..tests.test_auth.rst
+source/api/nova..tests.test_cloud.rst
+source/api/nova..tests.test_compute.rst
+source/api/nova..tests.test_console.rst
+source/api/nova..tests.test_direct.rst
+source/api/nova..tests.test_flags.rst
+source/api/nova..tests.test_instance_types.rst
+source/api/nova..tests.test_localization.rst
+source/api/nova..tests.test_log.rst
+source/api/nova..tests.test_middleware.rst
+source/api/nova..tests.test_misc.rst
+source/api/nova..tests.test_network.rst
+source/api/nova..tests.test_quota.rst
+source/api/nova..tests.test_rpc.rst
+source/api/nova..tests.test_scheduler.rst
+source/api/nova..tests.test_service.rst
+source/api/nova..tests.test_test.rst
+source/api/nova..tests.test_twistd.rst
+source/api/nova..tests.test_utils.rst
+source/api/nova..tests.test_virt.rst
+source/api/nova..tests.test_volume.rst
+source/api/nova..tests.test_xenapi.rst
+source/api/nova..tests.xenapi.stubs.rst
+source/api/nova..twistd.rst
+source/api/nova..utils.rst
+source/api/nova..version.rst
+source/api/nova..virt.connection.rst
+source/api/nova..virt.disk.rst
+source/api/nova..virt.fake.rst
+source/api/nova..virt.hyperv.rst
+source/api/nova..virt.images.rst
+source/api/nova..virt.libvirt_conn.rst
+source/api/nova..virt.xenapi.fake.rst
+source/api/nova..virt.xenapi.network_utils.rst
+source/api/nova..virt.xenapi.vm_utils.rst
+source/api/nova..virt.xenapi.vmops.rst
+source/api/nova..virt.xenapi.volume_utils.rst
+source/api/nova..virt.xenapi.volumeops.rst
+source/api/nova..virt.xenapi_conn.rst
+source/api/nova..volume.api.rst
+source/api/nova..volume.driver.rst
+source/api/nova..volume.manager.rst
+source/api/nova..volume.san.rst
+source/api/nova..wsgi.rst
diff --git a/doc/build/html/.buildinfo b/doc/build/html/.buildinfo
new file mode 100644
index 000000000..091736d4f
--- /dev/null
+++ b/doc/build/html/.buildinfo
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: 2a2fe6198f4be4a4d6f289b09d16d74a
+tags: fbb0d17656682115ca4d033fb2f83ba1
diff --git a/doc/source/adminguide/distros/others.rst b/doc/source/adminguide/distros/others.rst
deleted file mode 100644
index ec14a9abb..000000000
--- a/doc/source/adminguide/distros/others.rst
+++ /dev/null
@@ -1,88 +0,0 @@
-Installation on other distros (like Debian, Fedora or CentOS )
-==============================================================
-
-Feel free to add additional notes for additional distributions.
-
-Nova installation on CentOS 5.5
--------------------------------
-
-These are notes for installing OpenStack Compute on CentOS 5.5 and will be updated but are NOT final. Please test for accuracy and edit as you see fit.
-
-The principle botleneck for running nova on centos in python 2.6. Nova is written in python 2.6 and CentOS 5.5. comes with python 2.4. We can not update python system wide as some core utilities (like yum) is dependent on python 2.4. Also very few python 2.6 modules are available in centos/epel repos.
-
-Pre-reqs
---------
-
-Add euca2ools and EPEL repo first.::
-
- cat >/etc/yum.repos.d/euca2ools.repo << EUCA_REPO_CONF_EOF
- [eucalyptus]
- name=euca2ools
- baseurl=http://www.eucalyptussoftware.com/downloads/repo/euca2ools/1.3.1/yum/centos/
- enabled=1
- gpgcheck=0
-
- EUCA_REPO_CONF_EOF
-
-::
-
- rpm -Uvh 'http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm'
-
-Now install python2.6, kvm and few other libraries through yum::
-
- yum -y install dnsmasq vblade kpartx kvm gawk iptables ebtables bzr screen euca2ools curl rabbitmq-server gcc gcc-c++ autoconf automake swig openldap openldap-servers nginx python26 python26-devel python26-distribute git openssl-devel python26-tools mysql-server qemu kmod-kvm libxml2 libxslt libxslt-devel mysql-devel
-
-Then download the latest aoetools and then build(and install) it, check for the latest version on sourceforge, exact url will change if theres a new release::
-
- wget -c http://sourceforge.net/projects/aoetools/files/aoetools/32/aoetools-32.tar.gz/download
- tar -zxvf aoetools-32.tar.gz
- cd aoetools-32
- make
- make install
-
-Add the udev rules for aoetools::
-
- cat > /etc/udev/rules.d/60-aoe.rules << AOE_RULES_EOF
- SUBSYSTEM=="aoe", KERNEL=="discover", NAME="etherd/%k", GROUP="disk", MODE="0220"
- SUBSYSTEM=="aoe", KERNEL=="err", NAME="etherd/%k", GROUP="disk", MODE="0440"
- SUBSYSTEM=="aoe", KERNEL=="interfaces", NAME="etherd/%k", GROUP="disk", MODE="0220"
- SUBSYSTEM=="aoe", KERNEL=="revalidate", NAME="etherd/%k", GROUP="disk", MODE="0220"
- # aoe block devices
- KERNEL=="etherd*", NAME="%k", GROUP="disk"
- AOE_RULES_EOF
-
-Load the kernel modules::
-
- modprobe aoe
-
-::
-
- modprobe kvm
-
-Now, install the python modules using easy_install-2.6, this ensures the installation are done against python 2.6
-
-
-easy_install-2.6 twisted sqlalchemy mox greenlet carrot daemon eventlet tornado IPy routes lxml MySQL-python
-python-gflags need to be downloaded and installed manually, use these commands (check the exact url for newer releases ):
-
-::
-
- wget -c "http://python-gflags.googlecode.com/files/python-gflags-1.4.tar.gz"
- tar -zxvf python-gflags-1.4.tar.gz
- cd python-gflags-1.4
- python2.6 setup.py install
- cd ..
-
-Same for python2.6-libxml2 module, notice the --with-python and --prefix flags. --with-python ensures we are building it against python2.6 (otherwise it will build against python2.4, which is default)::
-
- wget -c "ftp://xmlsoft.org/libxml2/libxml2-2.7.3.tar.gz"
- tar -zxvf libxml2-2.7.3.tar.gz
- cd libxml2-2.7.3
- ./configure --with-python=/usr/bin/python26 --prefix=/usr
- make all
- make install
- cd python
- python2.6 setup.py install
- cd ..
-
-Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst
deleted file mode 100644
index bd0693c46..000000000
--- a/doc/source/adminguide/distros/ubuntu.10.04.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-Installing on Ubuntu 10.04 (Lucid)
-==================================
-
-Step 1: Install dependencies
-----------------------------
-Grab the latest code from launchpad:
-
-::
-
- bzr clone lp:nova
-
-Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (when using Debian, edit nova.sh to have USE_PPA=0):
-
-.. todo:: give a link to a stable releases page
-
-Step 2: Install dependencies
-----------------------------
-
-Nova requires rabbitmq for messaging, so install that first.
-
-*Note:* You must have sudo installed to run these commands as shown here.
-
-::
-
- sudo apt-get install rabbitmq-server
-
-
-You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
-
-If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gflags which is included in the OpenStack PPA.
-
-::
-
- sudo apt-get install python-software-properties
- sudo add-apt-repository ppa:nova-core/trunk
- sudo apt-get update
- sudo apt-get install python-twisted python-gflags
-
-
-Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/distros/ubuntu.10.10.rst b/doc/source/adminguide/distros/ubuntu.10.10.rst
deleted file mode 100644
index a3fa2def1..000000000
--- a/doc/source/adminguide/distros/ubuntu.10.10.rst
+++ /dev/null
@@ -1,41 +0,0 @@
-Installing on Ubuntu 10.10 (Maverick)
-=====================================
-Single Machine Installation (Ubuntu 10.10)
-
-While we wouldn't expect you to put OpenStack Compute into production on a non-LTS version of Ubuntu, these instructions are up-to-date with the latest version of Ubuntu.
-
-Make sure you are running Ubuntu 10.10 so that the packages will be available. This install requires more than 70 MB of free disk space.
-
-These instructions are based on Soren Hansen's blog entry, Openstack on Maverick. A script is in progress as well.
-
-Step 1: Install required prerequisites
---------------------------------------
-Nova requires rabbitmq for messaging and redis for storing state (for now), so we'll install these first.::
-
- sudo apt-get install rabbitmq-server redis-server
-
-You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
-
-Step 2: Install Nova packages available in Maverick Meerkat
------------------------------------------------------------
-Type or copy/paste in the following line to get the packages that you use to run OpenStack Compute.::
-
- sudo apt-get install python-nova
- sudo apt-get install nova-api nova-objectstore nova-compute nova-scheduler nova-network euca2ools unzip
-
-You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. This operation may take a while as many dependent packages will be installed. Note: there is a dependency problem with python-nova which can be worked around by installing first.
-
-When the installation is complete, you'll see the following lines confirming:::
-
- Adding system user `nova' (UID 106) ...
- Adding new user `nova' (UID 106) with group `nogroup' ...
- Not creating home directory `/var/lib/nova'.
- Setting up nova-scheduler (0.9.1~bzr331-0ubuntu2) ...
- * Starting nova scheduler nova-scheduler
- WARNING:root:Starting scheduler node
- ...done.
- Processing triggers for libc-bin ...
- ldconfig deferred processing now taking place
- Processing triggers for python-support ...
-
-Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/flags.rst b/doc/source/adminguide/flags.rst
deleted file mode 100644
index 072f0a1a5..000000000
--- a/doc/source/adminguide/flags.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Flags and Flagfiles
-===================
-
-* python-gflags
-* flagfiles
-* list of flags by component (see concepts list)
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
deleted file mode 100644
index c53455e3e..000000000
--- a/doc/source/adminguide/multi.node.install.rst
+++ /dev/null
@@ -1,392 +0,0 @@
-
-Installing Nova on Multiple Servers
-===================================
-
-When you move beyond evaluating the technology and into building an actual
-production environment, you will need to know how to configure your datacenter
-and how to deploy components across your clusters. This guide should help you
-through that process.
-
-You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
-
-This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
-
-For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node.
-
-Requirements for a multi-node installation
-------------------------------------------
-
-* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though.
-* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
-* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
-
-Assumptions
------------
-
-* Networking is configured between/through the physical machines on a single subnet.
-* Installation and execution are both performed by ROOT user.
-
-
-Scripted Installation
----------------------
-A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
-
-You must run these scripts with root permissions.
-
-From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/.
-
-::
-
- wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/nova-CC-install-v1.1.sh
-
-Ensure you can execute the script by modifying the permissions on the script file.
-
-::
-
- sudo chmod 755 nova-CC-install-v1.1.sh
-
-
-::
-
- sudo ./nova-CC-install-v1.1.sh
-
-Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. You can use the nova-NODE-installer.sh script from the above github-hosted project for the compute node installation.
-
-Copy the nova.conf from the cloud controller node to the compute node.
-
-Restart related services::
-
- libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
-
-You can go to the `Configuration section`_ for next steps.
-
-Manual Installation - Step-by-Step
-----------------------------------
-The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
-
-Cloud Controller Installation
-`````````````````````````````
-On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_.
-
-Step 1 - Use apt-get to get the latest code
--------------------------------------------
-
-1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo:
-
-::
-
- sudo apt-get install python-software-properties
- sudo add-apt-repository ppa:nova-core/trunk
-
-2. Run update.
-
-::
-
- sudo apt-get update
-
-3. Install python required packages, nova-packages, and helper apps.
-
-::
-
- sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip
-
-It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
-
-Step 2 Set up configuration file (installed in /etc/nova)
----------------------------------------------------------
-
-1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
-
-::
-
---daemonize=1
---dhcpbridge_flagfile=/etc/nova/nova.conf
---dhcpbridge=/usr/bin/nova-dhcpbridge
---logdir=/var/log/nova
---state_path=/var/lib/nova
-
-The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
-
---sql_connection ### Location of Nova SQL DB
-
---s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets
-
---rabbit_host ### This is where the rabbit AMQP messaging service is hosted
-
---cc_host ### This is where the the nova-api service lives
-
---verbose ### Optional but very helpful during initial setup
-
---ec2_url ### The location to interface nova-api
-
---network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs:
-
-nova.network.manager.FlatManager # Simple, no-vlan networking type
-nova.network.manager. FlatDHCPManager # Flat networking with DHCP
-nova.network.manager.VlanManager # Vlan networking with DHCP – /DEFAULT/ if no network manager is defined in nova.conf
-
---fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12
-
---network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000
-
-The following code can be cut and paste, and edited to your setup:
-
-Note: CC_ADDR=<the external IP address of your cloud controller>
-
-Detailed explanation of the following example is available above.
-
-::
-
---sql_connection=mysql://root:nova@<CC_ADDR>/nova
---s3_host=<CC_ADDR>
---rabbit_host=<CC_ADDR>
---cc_host=<CC_ADDR>
---verbose
---ec2_url=http://<CC_ADDR>:8773/services/Cloud
---network_manager=nova.network.manager.VlanManager
---fixed_range=<network/prefix>
---network_size=<# of addrs>
-
-2. Create a “nova” group, and set permissions::
-
- addgroup nova
-
-The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
-
- chown -R root:nova /etc/nova
- chmod 644 /etc/nova/nova.conf
-
-Step 3 - Setup the SQL DB (MySQL for this setup)
-------------------------------------------------
-
-1. First you 'preseed' to bypass all the installation prompts::
-
- bash
- MYSQL_PASS=nova
- cat <<MYSQL_PRESEED | debconf-set-selections
- mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
- mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
- mysql-server-5.1 mysql-server/start_on_boot boolean true
- MYSQL_PRESEED
-
-2. Install MySQL::
-
- apt-get install -y mysql-server
-
-3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any::
-
- sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
- service mysql restart
-
-4. MySQL DB configuration:
-
-Create NOVA database::
-
- mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
-
-Update the DB to include user 'root'@'%' with super user privileges::
-
- mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
-
-Set mySQL root password::
-
- mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
-
-Compute Node Installation
-`````````````````````````
-
-Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node.
-
-Network Configuration
----------------------
-
-If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
-
-Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
-
- < begin /etc/network/interfaces >
- # The loopback network interface
- auto lo
- iface lo inet loopback
-
- # Networking for NOVA
- auto br100
-
- iface br100 inet dhcp
- bridge_ports eth0
- bridge_stp off
- bridge_maxwait 0
- bridge_fd 0
- < end /etc/network/interfaces >
-
-Next, restart networking to apply the changes::
-
- sudo /etc/init.d/networking restart
-
-Configuration
-`````````````
-
-On the Compute node, you should continue with these configuration steps.
-
-Step 1 - Set up the Nova environment
-------------------------------------
-
-These are the commands you run to update the database if needed, and then set up a user and project::
-
- /usr/bin/python /usr/bin/nova-manage db sync
- /usr/bin/python /usr/bin/nova-manage user admin <user_name>
- /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
- /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
-
-Here is an example of what this looks like with real data::
-
- /usr/bin/python /usr/bin/nova-manage db sync
- /usr/bin/python /usr/bin/nova-manage user admin dub
- /usr/bin/python /usr/bin/nova-manage project create dubproject dub
- /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
-
-(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.)
-
-Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
-
-On running the "nova-manage network create" command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. You only need to mark the network as a bridge if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
-
-
-Step 2 - Create Nova certifications
------------------------------------
-
-1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
-
-::
-
- mkdir –p /root/creds
- /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
-
-2. Unzip them in your home directory, and add them to your environment.
-
-::
-
- unzip /root/creds/novacreds.zip -d /root/creds/
- cat /root/creds/novarc >> ~/.bashrc
- source ~/.bashrc
-
-Step 3 - Restart all relevant services
---------------------------------------
-
-Restart all six services in total, just to cover the entire spectrum::
-
- libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
-
-Step 4 - Closing steps, and cleaning up
----------------------------------------
-
-One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
-
- euca-authorize -P icmp -t -1:-1 default
- euca-authorize -P tcp -p 22 default
-
-Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following::
-
- killall dnsmasq
- service nova-network restart
-
-To avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally::
-
- chgrp kvm /dev/kvm
- chmod g+rwx /dev/kvm
-
-If you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step::
-
- # iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773
-
-Testing the Installation
-````````````````````````
-
-You can confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query::
-
- mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'
-
-In return, you should see something similar to this::
- +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
- | created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone |
- +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
- | 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova |
- | 2011-01-28 22:52:48 | 2011-02-03 06:55:57 | NULL | 0 | 2 | osdemo02 | nova-compute | compute | 46056 | 0 | nova |
- | 2011-01-28 22:52:52 | 2011-02-03 06:55:50 | NULL | 0 | 3 | osdemo02 | nova-scheduler | scheduler | 46065 | 0 | nova |
- | 2011-01-29 23:49:29 | 2011-02-03 06:54:26 | NULL | 0 | 4 | osdemo01 | nova-compute | compute | 37050 | 0 | nova |
- | 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova |
- | 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova |
- +---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
-You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.
-
-You can then use `euca2ools` to test some items::
-
- euca-describe-images
- euca-describe-instances
-
-If you have issues with the API key, you may need to re-source your creds file::
-
- . /root/creds/novarc
-
-If you don’t get any immediate errors, you’re successfully making calls to your cloud!
-
-Spinning up a VM for Testing
-````````````````````````````
-
-(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
-
-The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
-
-UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_.
-
-Download the image, and publish to your bucket:
-
-::
-
- image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
- wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
- uec-publish-tarball $image mybucket
-
-This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this.
-
-Create a keypair to SSH to the server:
-
-::
-
- euca-add-keypair mykey > mykey.priv
-
- chmod 0600 mykey.priv
-
-Boot your instance:
-
-::
-
- euca-run-instances $emi -k mykey -t m1.tiny
-
-($emi is replaced with the output from the previous command)
-
-Checking status, and confirming communication:
-
-Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
-
-::
-
- euca-describe-instances
-
-Once in a "running" state, you can use your SSH key connect:
-
-::
-
- ssh -i mykey.priv root@$ipaddress
-
-When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command:
-
-::
-
- euca-terminate-instances $instance-id
-
-You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d.
-
-For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
-
-Enjoy your new private cloud, and play responsibly!
diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst
deleted file mode 100644
index ff43aa90b..000000000
--- a/doc/source/adminguide/single.node.install.rst
+++ /dev/null
@@ -1,362 +0,0 @@
-Installing Nova on a Single Host
-================================
-
-Nova can be run on a single machine, and it is recommended that new users practice managing this type of installation before graduating to multi node systems.
-
-The fastest way to get a test cloud running is through our :doc:`../quickstart`. But for more detail on installing the system read this doc.
-
-
-Step 1 and 2: Get the latest Nova code system software
-------------------------------------------------------
-
-Depending on your system, the method for accomplishing this varies
-
-.. toctree::
- :maxdepth: 1
-
- distros/ubuntu.10.04
- distros/ubuntu.10.10
- distros/others
-
-
-Step 3: Build and install Nova services
----------------------------------------
-
-Switch to the base nova source directory.
-
-Then type or copy/paste in the following line to compile the Python code for OpenStack Compute.
-
-::
-
- sudo python setup.py build
- sudo python setup.py install
-
-
-When the installation is complete, you'll see the following lines:
-
-::
-
- Installing nova-network script to /usr/local/bin
- Installing nova-volume script to /usr/local/bin
- Installing nova-objectstore script to /usr/local/bin
- Installing nova-manage script to /usr/local/bin
- Installing nova-scheduler script to /usr/local/bin
- Installing nova-dhcpbridge script to /usr/local/bin
- Installing nova-compute script to /usr/local/bin
- Installing nova-instancemonitor script to /usr/local/bin
- Installing nova-api script to /usr/local/bin
- Installing nova-import-canonical-imagestore script to /usr/local/bin
-
- Installed /usr/local/lib/python2.6/dist-packages/nova-2010.1-py2.6.egg
- Processing dependencies for nova==2010.1
- Finished processing dependencies for nova==2010.1
-
-
-Step 4: Create the Nova Database
---------------------------------
-Type or copy/paste in the following line to create your nova db::
-
- sudo nova-manage db sync
-
-Step 5: Create a Nova administrator
------------------------------------
-Type or copy/paste in the following line to create a user named "anne."::
-
- sudo nova-manage user admin anne
-
-You see an access key and a secret key export, such as these made-up ones:::
-
- export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
- export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
-
-Step 6: Create the network
---------------------------
-
-Type or copy/paste in the following line to create a network prior to creating a project.
-
-::
-
- sudo nova-manage network create 10.0.0.0/8 1 64
-
-For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks.
-
-After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database.
-
-Step 7: Create a project with the user you created
---------------------------------------------------
-Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
-
-::
-
- sudo nova-manage project create IRT anne
-
-::
-
- Generating RSA private key, 1024 bit long modulus
- .....++++++
- ..++++++
- e is 65537 (0x10001)
- Using configuration from ./openssl.cnf
- Check that the request matches the signature
- Signature ok
- The Subject's Distinguished Name is as follows
- countryName :PRINTABLE:'US'
- stateOrProvinceName :PRINTABLE:'California'
- localityName :PRINTABLE:'MountainView'
- organizationName :PRINTABLE:'AnsoLabs'
- organizationalUnitName:PRINTABLE:'NovaDev'
- commonName :PRINTABLE:'anne-2010-10-12T21:12:35Z'
- Certificate is to be certified until Oct 12 21:12:35 2011 GMT (365 days)
-
- Write out database with 1 new entries
- Data Base Updated
-
-
-Step 8: Unzip the nova.zip
---------------------------
-
-You should have a nova.zip file in your current working directory. Unzip it with this command:
-
-::
-
- unzip nova.zip
-
-
-You'll see these files extract.
-
-::
-
- Archive: nova.zip
- extracting: novarc
- extracting: pk.pem
- extracting: cert.pem
- extracting: nova-vpn.conf
- extracting: cacert.pem
-
-
-Step 9: Source the rc file
---------------------------
-Type or copy/paste the following to source the novarc file in your current working directory.
-
-::
-
- . novarc
-
-
-Step 10: Pat yourself on the back :)
------------------------------------
-Congratulations, your cloud is up and running, you’ve created an admin user, created a network, retrieved the user's credentials and put them in your environment.
-
-Now you need an image.
-
-
-Step 11: Get an image
---------------------
-To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server.
-
-::
-
- wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
-
-
-::
-
- --2010-10-12 21:40:55-- http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
- Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7
- Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected.
- HTTP request sent, awaiting response... 200 OK
- Length: 58520278 (56M) [application/x-gzip]
- Saving to: `images.tgz'
-
- 100%[======================================>] 58,520,278 14.1M/s in 3.9s
-
- 2010-10-12 21:40:59 (14.1 MB/s) - `images.tgz' saved [58520278/58520278]
-
-
-
-Step 12: Decompress the image file
-----------------------------------
-Use this command to extract the image files:::
-
- tar xvzf images.tgz
-
-You get a directory listing like so:::
-
- images
- |-- aki-lucid
- | |-- image
- | `-- info.json
- |-- ami-tiny
- | |-- image
- | `-- info.json
- `-- ari-lucid
- |-- image
- `-- info.json
-
-Step 13: Send commands to upload sample image to the cloud
-----------------------------------------------------------
-
-Type or copy/paste the following commands to create a manifest for the kernel.::
-
- euca-bundle-image -i images/aki-lucid/image -p kernel --kernel true
-
-You should see this in response:::
-
- Checking image
- Tarring image
- Encrypting image
- Splitting image...
- Part: kernel.part.0
- Generating manifest /tmp/kernel.manifest.xml
-
-Type or copy/paste the following commands to create a manifest for the ramdisk.::
-
- euca-bundle-image -i images/ari-lucid/image -p ramdisk --ramdisk true
-
-You should see this in response:::
-
- Checking image
- Tarring image
- Encrypting image
- Splitting image...
- Part: ramdisk.part.0
- Generating manifest /tmp/ramdisk.manifest.xml
-
-Type or copy/paste the following commands to upload the kernel bundle.::
-
- euca-upload-bundle -m /tmp/kernel.manifest.xml -b mybucket
-
-You should see this in response:::
-
- Checking bucket: mybucket
- Creating bucket: mybucket
- Uploading manifest file
- Uploading part: kernel.part.0
- Uploaded image as mybucket/kernel.manifest.xml
-
-Type or copy/paste the following commands to upload the ramdisk bundle.::
-
- euca-upload-bundle -m /tmp/ramdisk.manifest.xml -b mybucket
-
-You should see this in response:::
-
- Checking bucket: mybucket
- Uploading manifest file
- Uploading part: ramdisk.part.0
- Uploaded image as mybucket/ramdisk.manifest.xml
-
-Type or copy/paste the following commands to register the kernel and get its ID.::
-
- euca-register mybucket/kernel.manifest.xml
-
-You should see this in response:::
-
- IMAGE ami-fcbj2non
-
-Type or copy/paste the following commands to register the ramdisk and get its ID.::
-
- euca-register mybucket/ramdisk.manifest.xml
-
-You should see this in response:::
-
- IMAGE ami-orukptrc
-
-Type or copy/paste the following commands to create a manifest for the machine image associated with the ramdisk and kernel IDs that you got from the previous commands.::
-
- euca-bundle-image -i images/ami-tiny/image -p machine --kernel ami-fcbj2non --ramdisk ami-orukptrc
-
-You should see this in response:::
-
- Checking image
- Tarring image
- Encrypting image
- Splitting image...
- Part: machine.part.0
- Part: machine.part.1
- Part: machine.part.2
- Part: machine.part.3
- Part: machine.part.4
- Generating manifest /tmp/machine.manifest.xml
-
-Type or copy/paste the following commands to upload the machine image bundle.::
-
- euca-upload-bundle -m /tmp/machine.manifest.xml -b mybucket
-
-You should see this in response:::
-
- Checking bucket: mybucket
- Uploading manifest file
- Uploading part: machine.part.0
- Uploading part: machine.part.1
- Uploading part: machine.part.2
- Uploading part: machine.part.3
- Uploading part: machine.part.4
- Uploaded image as mybucket/machine.manifest.xml
-
-Type or copy/paste the following commands to register the machine image and get its ID.::
-
- euca-register mybucket/machine.manifest.xml
-
-You should see this in response:::
-
- IMAGE ami-g06qbntt
-
-Type or copy/paste the following commands to register a SSH keypair for use in starting and accessing the instances.::
-
- euca-add-keypair mykey > mykey.priv
- chmod 600 mykey.priv
-
-Type or copy/paste the following commands to run an instance using the keypair and IDs that we previously created.::
-
- euca-run-instances ami-g06qbntt --kernel ami-fcbj2non --ramdisk ami-orukptrc -k mykey
-
-You should see this in response:::
-
- RESERVATION r-0at28z12 IRT
- INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 scheduling mykey (IRT, None) m1.small 2010-10-18 19:02:10.443599
-
-Type or copy/paste the following commands to watch as the scheduler launches, and completes booting your instance.::
-
- euca-describe-instances
-
-You should see this in response:::
-
- RESERVATION r-0at28z12 IRT
- INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 launching mykey (IRT, cloud02) m1.small 2010-10-18 19:02:10.443599
-
-Type or copy/paste the following commands to see when loading is completed and the instance is running.::
-
- euca-describe-instances
-
-You should see this in response:::
-
- RESERVATION r-0at28z12 IRT
- INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 running mykey (IRT, cloud02) 0 m1.small 2010-10-18 19:02:10.443599
-
-Type or copy/paste the following commands to check that the virtual machine is running.::
-
- virsh list
-
-You should see this in response:::
-
- Id Name State
- ----------------------------------
- 1 2842445831 running
-
-Type or copy/paste the following commands to ssh to the instance using your private key.::
-
- ssh -i mykey.priv root@10.0.0.3
-
-
-Troubleshooting Installation
-----------------------------
-
-If you see an "error loading the config file './openssl.cnf'" it means you can copy the openssl.cnf file to the location where Nova expects it and reboot, then try the command again.
-
-::
-
- cp /etc/ssl/openssl.cnf ~
- sudo reboot
-
-
-
diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst
new file mode 100644
index 000000000..329a465db
--- /dev/null
+++ b/doc/source/api/autoindex.rst
@@ -0,0 +1,144 @@
+.. toctree::
+ :maxdepth: 1
+
+ nova..adminclient.rst
+ nova..api.direct.rst
+ nova..api.ec2.admin.rst
+ nova..api.ec2.apirequest.rst
+ nova..api.ec2.cloud.rst
+ nova..api.ec2.metadatarequesthandler.rst
+ nova..api.openstack.auth.rst
+ nova..api.openstack.backup_schedules.rst
+ nova..api.openstack.common.rst
+ nova..api.openstack.consoles.rst
+ nova..api.openstack.faults.rst
+ nova..api.openstack.flavors.rst
+ nova..api.openstack.images.rst
+ nova..api.openstack.servers.rst
+ nova..api.openstack.shared_ip_groups.rst
+ nova..api.openstack.zones.rst
+ nova..auth.dbdriver.rst
+ nova..auth.fakeldap.rst
+ nova..auth.ldapdriver.rst
+ nova..auth.manager.rst
+ nova..auth.signer.rst
+ nova..cloudpipe.pipelib.rst
+ nova..compute.api.rst
+ nova..compute.instance_types.rst
+ nova..compute.manager.rst
+ nova..compute.monitor.rst
+ nova..compute.power_state.rst
+ nova..console.api.rst
+ nova..console.fake.rst
+ nova..console.manager.rst
+ nova..console.xvp.rst
+ nova..context.rst
+ nova..crypto.rst
+ nova..db.api.rst
+ nova..db.base.rst
+ nova..db.migration.rst
+ nova..db.sqlalchemy.api.rst
+ nova..db.sqlalchemy.migrate_repo.manage.rst
+ nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
+ nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
+ nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
+ nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
+ nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
+ nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
+ nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
+ nova..db.sqlalchemy.migration.rst
+ nova..db.sqlalchemy.models.rst
+ nova..db.sqlalchemy.session.rst
+ nova..exception.rst
+ nova..fakememcache.rst
+ nova..fakerabbit.rst
+ nova..flags.rst
+ nova..image.glance.rst
+ nova..image.local.rst
+ nova..image.s3.rst
+ nova..image.service.rst
+ nova..log.rst
+ nova..manager.rst
+ nova..network.api.rst
+ nova..network.linux_net.rst
+ nova..network.manager.rst
+ nova..objectstore.bucket.rst
+ nova..objectstore.handler.rst
+ nova..objectstore.image.rst
+ nova..objectstore.stored.rst
+ nova..quota.rst
+ nova..rpc.rst
+ nova..scheduler.chance.rst
+ nova..scheduler.driver.rst
+ nova..scheduler.manager.rst
+ nova..scheduler.simple.rst
+ nova..scheduler.zone.rst
+ nova..service.rst
+ nova..test.rst
+ nova..tests.api.openstack.fakes.rst
+ nova..tests.api.openstack.test_adminapi.rst
+ nova..tests.api.openstack.test_api.rst
+ nova..tests.api.openstack.test_auth.rst
+ nova..tests.api.openstack.test_common.rst
+ nova..tests.api.openstack.test_faults.rst
+ nova..tests.api.openstack.test_flavors.rst
+ nova..tests.api.openstack.test_images.rst
+ nova..tests.api.openstack.test_ratelimiting.rst
+ nova..tests.api.openstack.test_servers.rst
+ nova..tests.api.openstack.test_shared_ip_groups.rst
+ nova..tests.api.openstack.test_zones.rst
+ nova..tests.api.test_wsgi.rst
+ nova..tests.db.fakes.rst
+ nova..tests.declare_flags.rst
+ nova..tests.fake_flags.rst
+ nova..tests.glance.stubs.rst
+ nova..tests.hyperv_unittest.rst
+ nova..tests.objectstore_unittest.rst
+ nova..tests.real_flags.rst
+ nova..tests.runtime_flags.rst
+ nova..tests.test_access.rst
+ nova..tests.test_api.rst
+ nova..tests.test_auth.rst
+ nova..tests.test_cloud.rst
+ nova..tests.test_compute.rst
+ nova..tests.test_console.rst
+ nova..tests.test_direct.rst
+ nova..tests.test_flags.rst
+ nova..tests.test_instance_types.rst
+ nova..tests.test_localization.rst
+ nova..tests.test_log.rst
+ nova..tests.test_middleware.rst
+ nova..tests.test_misc.rst
+ nova..tests.test_network.rst
+ nova..tests.test_quota.rst
+ nova..tests.test_rpc.rst
+ nova..tests.test_scheduler.rst
+ nova..tests.test_service.rst
+ nova..tests.test_test.rst
+ nova..tests.test_twistd.rst
+ nova..tests.test_utils.rst
+ nova..tests.test_virt.rst
+ nova..tests.test_volume.rst
+ nova..tests.test_xenapi.rst
+ nova..tests.xenapi.stubs.rst
+ nova..twistd.rst
+ nova..utils.rst
+ nova..version.rst
+ nova..virt.connection.rst
+ nova..virt.disk.rst
+ nova..virt.fake.rst
+ nova..virt.hyperv.rst
+ nova..virt.images.rst
+ nova..virt.libvirt_conn.rst
+ nova..virt.xenapi.fake.rst
+ nova..virt.xenapi.network_utils.rst
+ nova..virt.xenapi.vm_utils.rst
+ nova..virt.xenapi.vmops.rst
+ nova..virt.xenapi.volume_utils.rst
+ nova..virt.xenapi.volumeops.rst
+ nova..virt.xenapi_conn.rst
+ nova..volume.api.rst
+ nova..volume.driver.rst
+ nova..volume.manager.rst
+ nova..volume.san.rst
+ nova..wsgi.rst
diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst
new file mode 100644
index 000000000..35fa839e1
--- /dev/null
+++ b/doc/source/api/nova..adminclient.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..adminclient` Module
+==============================================================================
+.. automodule:: nova..adminclient
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.direct.rst b/doc/source/api/nova..api.direct.rst
new file mode 100644
index 000000000..a1705c707
--- /dev/null
+++ b/doc/source/api/nova..api.direct.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.direct` Module
+==============================================================================
+.. automodule:: nova..api.direct
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst
new file mode 100644
index 000000000..4e9ab308b
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.admin.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.admin` Module
+==============================================================================
+.. automodule:: nova..api.ec2.admin
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst
new file mode 100644
index 000000000..c17a2ff3a
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.apirequest.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.apirequest` Module
+==============================================================================
+.. automodule:: nova..api.ec2.apirequest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst
new file mode 100644
index 000000000..f6145c217
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.cloud.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.cloud` Module
+==============================================================================
+.. automodule:: nova..api.ec2.cloud
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
new file mode 100644
index 000000000..75f5169e5
--- /dev/null
+++ b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.ec2.metadatarequesthandler` Module
+==============================================================================
+.. automodule:: nova..api.ec2.metadatarequesthandler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst
new file mode 100644
index 000000000..8c3f8f2da
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.auth.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.auth` Module
+==============================================================================
+.. automodule:: nova..api.openstack.auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst
new file mode 100644
index 000000000..6b406f12d
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.backup_schedules.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.backup_schedules` Module
+==============================================================================
+.. automodule:: nova..api.openstack.backup_schedules
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.common.rst b/doc/source/api/nova..api.openstack.common.rst
new file mode 100644
index 000000000..4fd734790
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.common.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.common` Module
+==============================================================================
+.. automodule:: nova..api.openstack.common
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.consoles.rst b/doc/source/api/nova..api.openstack.consoles.rst
new file mode 100644
index 000000000..1e3e09599
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.consoles.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.consoles` Module
+==============================================================================
+.. automodule:: nova..api.openstack.consoles
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst
new file mode 100644
index 000000000..7b25561f7
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.faults.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.faults` Module
+==============================================================================
+.. automodule:: nova..api.openstack.faults
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst
new file mode 100644
index 000000000..0deb724de
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.flavors.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.flavors` Module
+==============================================================================
+.. automodule:: nova..api.openstack.flavors
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst
new file mode 100644
index 000000000..82bd5f1e8
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.images.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.images` Module
+==============================================================================
+.. automodule:: nova..api.openstack.images
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst
new file mode 100644
index 000000000..c36856ea2
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.servers.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.servers` Module
+==============================================================================
+.. automodule:: nova..api.openstack.servers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.shared_ip_groups.rst b/doc/source/api/nova..api.openstack.shared_ip_groups.rst
new file mode 100644
index 000000000..4b1f44efe
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.shared_ip_groups.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.shared_ip_groups` Module
+==============================================================================
+.. automodule:: nova..api.openstack.shared_ip_groups
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.zones.rst b/doc/source/api/nova..api.openstack.zones.rst
new file mode 100644
index 000000000..ebe4569c5
--- /dev/null
+++ b/doc/source/api/nova..api.openstack.zones.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..api.openstack.zones` Module
+==============================================================================
+.. automodule:: nova..api.openstack.zones
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst
new file mode 100644
index 000000000..7de68b6e0
--- /dev/null
+++ b/doc/source/api/nova..auth.dbdriver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.dbdriver` Module
+==============================================================================
+.. automodule:: nova..auth.dbdriver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst
new file mode 100644
index 000000000..ca8a3ad4d
--- /dev/null
+++ b/doc/source/api/nova..auth.fakeldap.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.fakeldap` Module
+==============================================================================
+.. automodule:: nova..auth.fakeldap
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst
new file mode 100644
index 000000000..c44463522
--- /dev/null
+++ b/doc/source/api/nova..auth.ldapdriver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.ldapdriver` Module
+==============================================================================
+.. automodule:: nova..auth.ldapdriver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst
new file mode 100644
index 000000000..bc5ce2ec3
--- /dev/null
+++ b/doc/source/api/nova..auth.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.manager` Module
+==============================================================================
+.. automodule:: nova..auth.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst
new file mode 100644
index 000000000..aad824ead
--- /dev/null
+++ b/doc/source/api/nova..auth.signer.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..auth.signer` Module
+==============================================================================
+.. automodule:: nova..auth.signer
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst
new file mode 100644
index 000000000..054aaf484
--- /dev/null
+++ b/doc/source/api/nova..cloudpipe.pipelib.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..cloudpipe.pipelib` Module
+==============================================================================
+.. automodule:: nova..cloudpipe.pipelib
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.api.rst b/doc/source/api/nova..compute.api.rst
new file mode 100644
index 000000000..caa66313a
--- /dev/null
+++ b/doc/source/api/nova..compute.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.api` Module
+==============================================================================
+.. automodule:: nova..compute.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst
new file mode 100644
index 000000000..d206ff3a4
--- /dev/null
+++ b/doc/source/api/nova..compute.instance_types.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.instance_types` Module
+==============================================================================
+.. automodule:: nova..compute.instance_types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst
new file mode 100644
index 000000000..33a337c39
--- /dev/null
+++ b/doc/source/api/nova..compute.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.manager` Module
+==============================================================================
+.. automodule:: nova..compute.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst
new file mode 100644
index 000000000..a91169ecd
--- /dev/null
+++ b/doc/source/api/nova..compute.monitor.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.monitor` Module
+==============================================================================
+.. automodule:: nova..compute.monitor
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst
new file mode 100644
index 000000000..41b1080e5
--- /dev/null
+++ b/doc/source/api/nova..compute.power_state.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..compute.power_state` Module
+==============================================================================
+.. automodule:: nova..compute.power_state
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.api.rst b/doc/source/api/nova..console.api.rst
new file mode 100644
index 000000000..82a51d4c7
--- /dev/null
+++ b/doc/source/api/nova..console.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.api` Module
+==============================================================================
+.. automodule:: nova..console.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.fake.rst b/doc/source/api/nova..console.fake.rst
new file mode 100644
index 000000000..f053f85d6
--- /dev/null
+++ b/doc/source/api/nova..console.fake.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.fake` Module
+==============================================================================
+.. automodule:: nova..console.fake
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.manager.rst b/doc/source/api/nova..console.manager.rst
new file mode 100644
index 000000000..f9283a6c3
--- /dev/null
+++ b/doc/source/api/nova..console.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.manager` Module
+==============================================================================
+.. automodule:: nova..console.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..console.xvp.rst b/doc/source/api/nova..console.xvp.rst
new file mode 100644
index 000000000..a0887009e
--- /dev/null
+++ b/doc/source/api/nova..console.xvp.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..console.xvp` Module
+==============================================================================
+.. automodule:: nova..console.xvp
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst
new file mode 100644
index 000000000..9de1adb24
--- /dev/null
+++ b/doc/source/api/nova..context.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..context` Module
+==============================================================================
+.. automodule:: nova..context
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst
new file mode 100644
index 000000000..af9f63634
--- /dev/null
+++ b/doc/source/api/nova..crypto.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..crypto` Module
+==============================================================================
+.. automodule:: nova..crypto
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst
new file mode 100644
index 000000000..6d998fbb2
--- /dev/null
+++ b/doc/source/api/nova..db.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.api` Module
+==============================================================================
+.. automodule:: nova..db.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.base.rst b/doc/source/api/nova..db.base.rst
new file mode 100644
index 000000000..29fb417d6
--- /dev/null
+++ b/doc/source/api/nova..db.base.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.base` Module
+==============================================================================
+.. automodule:: nova..db.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.migration.rst b/doc/source/api/nova..db.migration.rst
new file mode 100644
index 000000000..71dfea301
--- /dev/null
+++ b/doc/source/api/nova..db.migration.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.migration` Module
+==============================================================================
+.. automodule:: nova..db.migration
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst
new file mode 100644
index 000000000..76d0c1bd3
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.api` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
new file mode 100644
index 000000000..93decfb27
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.manage` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.manage
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
new file mode 100644
index 000000000..4b1219edb
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.001_austin` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.001_austin
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
new file mode 100644
index 000000000..82f1f4680
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.002_bexar` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.002_bexar
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
new file mode 100644
index 000000000..98f3e8da7
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
new file mode 100644
index 000000000..5cbb81191
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
new file mode 100644
index 000000000..cef0c243e
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
new file mode 100644
index 000000000..a15697196
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
new file mode 100644
index 000000000..38842d1af
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.migration.rst b/doc/source/api/nova..db.sqlalchemy.migration.rst
new file mode 100644
index 000000000..3a9b01b9a
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.migration.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.migration` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.migration
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst
new file mode 100644
index 000000000..9c795d7f5
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.models.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.models` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.models
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst
new file mode 100644
index 000000000..cbfd6416a
--- /dev/null
+++ b/doc/source/api/nova..db.sqlalchemy.session.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..db.sqlalchemy.session` Module
+==============================================================================
+.. automodule:: nova..db.sqlalchemy.session
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst
new file mode 100644
index 000000000..97ac6b752
--- /dev/null
+++ b/doc/source/api/nova..exception.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..exception` Module
+==============================================================================
+.. automodule:: nova..exception
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..fakememcache.rst b/doc/source/api/nova..fakememcache.rst
new file mode 100644
index 000000000..7e7ffb98b
--- /dev/null
+++ b/doc/source/api/nova..fakememcache.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..fakememcache` Module
+==============================================================================
+.. automodule:: nova..fakememcache
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst
new file mode 100644
index 000000000..f1e27c266
--- /dev/null
+++ b/doc/source/api/nova..fakerabbit.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..fakerabbit` Module
+==============================================================================
+.. automodule:: nova..fakerabbit
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst
new file mode 100644
index 000000000..08165be44
--- /dev/null
+++ b/doc/source/api/nova..flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..flags` Module
+==============================================================================
+.. automodule:: nova..flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.glance.rst b/doc/source/api/nova..image.glance.rst
new file mode 100644
index 000000000..b0882d5ec
--- /dev/null
+++ b/doc/source/api/nova..image.glance.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.glance` Module
+==============================================================================
+.. automodule:: nova..image.glance
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.local.rst b/doc/source/api/nova..image.local.rst
new file mode 100644
index 000000000..b6ad5470b
--- /dev/null
+++ b/doc/source/api/nova..image.local.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.local` Module
+==============================================================================
+.. automodule:: nova..image.local
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.s3.rst b/doc/source/api/nova..image.s3.rst
new file mode 100644
index 000000000..e5b236127
--- /dev/null
+++ b/doc/source/api/nova..image.s3.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.s3` Module
+==============================================================================
+.. automodule:: nova..image.s3
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst
new file mode 100644
index 000000000..78ef1ecca
--- /dev/null
+++ b/doc/source/api/nova..image.service.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..image.service` Module
+==============================================================================
+.. automodule:: nova..image.service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..log.rst b/doc/source/api/nova..log.rst
new file mode 100644
index 000000000..ff209709f
--- /dev/null
+++ b/doc/source/api/nova..log.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..log` Module
+==============================================================================
+.. automodule:: nova..log
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst
new file mode 100644
index 000000000..576902491
--- /dev/null
+++ b/doc/source/api/nova..manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..manager` Module
+==============================================================================
+.. automodule:: nova..manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..network.api.rst b/doc/source/api/nova..network.api.rst
new file mode 100644
index 000000000..b63be2ba3
--- /dev/null
+++ b/doc/source/api/nova..network.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..network.api` Module
+==============================================================================
+.. automodule:: nova..network.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst
new file mode 100644
index 000000000..7af78d5ad
--- /dev/null
+++ b/doc/source/api/nova..network.linux_net.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..network.linux_net` Module
+==============================================================================
+.. automodule:: nova..network.linux_net
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst
new file mode 100644
index 000000000..0ea705533
--- /dev/null
+++ b/doc/source/api/nova..network.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..network.manager` Module
+==============================================================================
+.. automodule:: nova..network.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst
new file mode 100644
index 000000000..3bfdf639c
--- /dev/null
+++ b/doc/source/api/nova..objectstore.bucket.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.bucket` Module
+==============================================================================
+.. automodule:: nova..objectstore.bucket
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst
new file mode 100644
index 000000000..0eb8c4efb
--- /dev/null
+++ b/doc/source/api/nova..objectstore.handler.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.handler` Module
+==============================================================================
+.. automodule:: nova..objectstore.handler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst
new file mode 100644
index 000000000..fa4c971f1
--- /dev/null
+++ b/doc/source/api/nova..objectstore.image.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.image` Module
+==============================================================================
+.. automodule:: nova..objectstore.image
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst
new file mode 100644
index 000000000..2b1d997a3
--- /dev/null
+++ b/doc/source/api/nova..objectstore.stored.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..objectstore.stored` Module
+==============================================================================
+.. automodule:: nova..objectstore.stored
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst
new file mode 100644
index 000000000..4140d95d6
--- /dev/null
+++ b/doc/source/api/nova..quota.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..quota` Module
+==============================================================================
+.. automodule:: nova..quota
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst
new file mode 100644
index 000000000..5b2a9b8e2
--- /dev/null
+++ b/doc/source/api/nova..rpc.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..rpc` Module
+==============================================================================
+.. automodule:: nova..rpc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst
new file mode 100644
index 000000000..89c074c8f
--- /dev/null
+++ b/doc/source/api/nova..scheduler.chance.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.chance` Module
+==============================================================================
+.. automodule:: nova..scheduler.chance
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst
new file mode 100644
index 000000000..793ed9c7b
--- /dev/null
+++ b/doc/source/api/nova..scheduler.driver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.driver` Module
+==============================================================================
+.. automodule:: nova..scheduler.driver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst
new file mode 100644
index 000000000..d0fc7c423
--- /dev/null
+++ b/doc/source/api/nova..scheduler.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.manager` Module
+==============================================================================
+.. automodule:: nova..scheduler.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst
new file mode 100644
index 000000000..dacc2cf30
--- /dev/null
+++ b/doc/source/api/nova..scheduler.simple.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.simple` Module
+==============================================================================
+.. automodule:: nova..scheduler.simple
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.zone.rst b/doc/source/api/nova..scheduler.zone.rst
new file mode 100644
index 000000000..54c4bf201
--- /dev/null
+++ b/doc/source/api/nova..scheduler.zone.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..scheduler.zone` Module
+==============================================================================
+.. automodule:: nova..scheduler.zone
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst
new file mode 100644
index 000000000..2d2dfcf2e
--- /dev/null
+++ b/doc/source/api/nova..service.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..service` Module
+==============================================================================
+.. automodule:: nova..service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst
new file mode 100644
index 000000000..a6bdb6f1f
--- /dev/null
+++ b/doc/source/api/nova..test.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..test` Module
+==============================================================================
+.. automodule:: nova..test
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst
new file mode 100644
index 000000000..4a9ff5938
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.fakes.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.fakes` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.fakes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_adminapi.rst b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst
new file mode 100644
index 000000000..19a85ca0f
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_adminapi` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_adminapi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst
new file mode 100644
index 000000000..68106d221
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_api` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst
new file mode 100644
index 000000000..9f0011669
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_auth.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_auth` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_common.rst b/doc/source/api/nova..tests.api.openstack.test_common.rst
new file mode 100644
index 000000000..82f40ecb8
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_common.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_common` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_common
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst
new file mode 100644
index 000000000..b839ae8a3
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_faults.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_faults` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_faults
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
new file mode 100644
index 000000000..471fac56e
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_flavors` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_flavors
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst
new file mode 100644
index 000000000..57ae93c8c
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_images.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_images` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_images
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
new file mode 100644
index 000000000..9a857f795
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_ratelimiting` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_ratelimiting
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst
new file mode 100644
index 000000000..ea602e6ab
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_servers.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_servers` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_servers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
new file mode 100644
index 000000000..48814af00
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_shared_ip_groups` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_shared_ip_groups
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_zones.rst b/doc/source/api/nova..tests.api.openstack.test_zones.rst
new file mode 100644
index 000000000..ba7078e63
--- /dev/null
+++ b/doc/source/api/nova..tests.api.openstack.test_zones.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.openstack.test_zones` Module
+==============================================================================
+.. automodule:: nova..tests.api.openstack.test_zones
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst
new file mode 100644
index 000000000..8e79caa4d
--- /dev/null
+++ b/doc/source/api/nova..tests.api.test_wsgi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.api.test_wsgi` Module
+==============================================================================
+.. automodule:: nova..tests.api.test_wsgi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.db.fakes.rst b/doc/source/api/nova..tests.db.fakes.rst
new file mode 100644
index 000000000..cc79e55e2
--- /dev/null
+++ b/doc/source/api/nova..tests.db.fakes.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.db.fakes` Module
+==============================================================================
+.. automodule:: nova..tests.db.fakes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst
new file mode 100644
index 000000000..524e72e91
--- /dev/null
+++ b/doc/source/api/nova..tests.declare_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.declare_flags` Module
+==============================================================================
+.. automodule:: nova..tests.declare_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst
new file mode 100644
index 000000000..a8dc3df36
--- /dev/null
+++ b/doc/source/api/nova..tests.fake_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.fake_flags` Module
+==============================================================================
+.. automodule:: nova..tests.fake_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.glance.stubs.rst b/doc/source/api/nova..tests.glance.stubs.rst
new file mode 100644
index 000000000..7ef5fccbe
--- /dev/null
+++ b/doc/source/api/nova..tests.glance.stubs.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.glance.stubs` Module
+==============================================================================
+.. automodule:: nova..tests.glance.stubs
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.hyperv_unittest.rst b/doc/source/api/nova..tests.hyperv_unittest.rst
new file mode 100644
index 000000000..c08443121
--- /dev/null
+++ b/doc/source/api/nova..tests.hyperv_unittest.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.hyperv_unittest` Module
+==============================================================================
+.. automodule:: nova..tests.hyperv_unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst
new file mode 100644
index 000000000..0ae252f04
--- /dev/null
+++ b/doc/source/api/nova..tests.objectstore_unittest.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.objectstore_unittest` Module
+==============================================================================
+.. automodule:: nova..tests.objectstore_unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst
new file mode 100644
index 000000000..e9c0d1abd
--- /dev/null
+++ b/doc/source/api/nova..tests.real_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.real_flags` Module
+==============================================================================
+.. automodule:: nova..tests.real_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst
new file mode 100644
index 000000000..984e21199
--- /dev/null
+++ b/doc/source/api/nova..tests.runtime_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.runtime_flags` Module
+==============================================================================
+.. automodule:: nova..tests.runtime_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_access.rst b/doc/source/api/nova..tests.test_access.rst
new file mode 100644
index 000000000..300d8109e
--- /dev/null
+++ b/doc/source/api/nova..tests.test_access.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_access` Module
+==============================================================================
+.. automodule:: nova..tests.test_access
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_api.rst b/doc/source/api/nova..tests.test_api.rst
new file mode 100644
index 000000000..f9473062e
--- /dev/null
+++ b/doc/source/api/nova..tests.test_api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_api` Module
+==============================================================================
+.. automodule:: nova..tests.test_api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_auth.rst b/doc/source/api/nova..tests.test_auth.rst
new file mode 100644
index 000000000..ff4445ae4
--- /dev/null
+++ b/doc/source/api/nova..tests.test_auth.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_auth` Module
+==============================================================================
+.. automodule:: nova..tests.test_auth
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_cloud.rst b/doc/source/api/nova..tests.test_cloud.rst
new file mode 100644
index 000000000..7bd03db9a
--- /dev/null
+++ b/doc/source/api/nova..tests.test_cloud.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_cloud` Module
+==============================================================================
+.. automodule:: nova..tests.test_cloud
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_compute.rst b/doc/source/api/nova..tests.test_compute.rst
new file mode 100644
index 000000000..90fd6e9d1
--- /dev/null
+++ b/doc/source/api/nova..tests.test_compute.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_compute` Module
+==============================================================================
+.. automodule:: nova..tests.test_compute
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_console.rst b/doc/source/api/nova..tests.test_console.rst
new file mode 100644
index 000000000..f695f5d17
--- /dev/null
+++ b/doc/source/api/nova..tests.test_console.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_console` Module
+==============================================================================
+.. automodule:: nova..tests.test_console
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_direct.rst b/doc/source/api/nova..tests.test_direct.rst
new file mode 100644
index 000000000..4f7adef19
--- /dev/null
+++ b/doc/source/api/nova..tests.test_direct.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_direct` Module
+==============================================================================
+.. automodule:: nova..tests.test_direct
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_flags.rst b/doc/source/api/nova..tests.test_flags.rst
new file mode 100644
index 000000000..2ec35d6c2
--- /dev/null
+++ b/doc/source/api/nova..tests.test_flags.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_flags` Module
+==============================================================================
+.. automodule:: nova..tests.test_flags
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_instance_types.rst b/doc/source/api/nova..tests.test_instance_types.rst
new file mode 100644
index 000000000..ebe689966
--- /dev/null
+++ b/doc/source/api/nova..tests.test_instance_types.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_instance_types` Module
+==============================================================================
+.. automodule:: nova..tests.test_instance_types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_localization.rst b/doc/source/api/nova..tests.test_localization.rst
new file mode 100644
index 000000000..d93c83ba7
--- /dev/null
+++ b/doc/source/api/nova..tests.test_localization.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_localization` Module
+==============================================================================
+.. automodule:: nova..tests.test_localization
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_log.rst b/doc/source/api/nova..tests.test_log.rst
new file mode 100644
index 000000000..04ff5ead1
--- /dev/null
+++ b/doc/source/api/nova..tests.test_log.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_log` Module
+==============================================================================
+.. automodule:: nova..tests.test_log
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_middleware.rst b/doc/source/api/nova..tests.test_middleware.rst
new file mode 100644
index 000000000..2f9df5832
--- /dev/null
+++ b/doc/source/api/nova..tests.test_middleware.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_middleware` Module
+==============================================================================
+.. automodule:: nova..tests.test_middleware
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_misc.rst b/doc/source/api/nova..tests.test_misc.rst
new file mode 100644
index 000000000..4975f89d7
--- /dev/null
+++ b/doc/source/api/nova..tests.test_misc.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_misc` Module
+==============================================================================
+.. automodule:: nova..tests.test_misc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_network.rst b/doc/source/api/nova..tests.test_network.rst
new file mode 100644
index 000000000..3a4b04ea4
--- /dev/null
+++ b/doc/source/api/nova..tests.test_network.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_network` Module
+==============================================================================
+.. automodule:: nova..tests.test_network
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_quota.rst b/doc/source/api/nova..tests.test_quota.rst
new file mode 100644
index 000000000..24ebf9ca3
--- /dev/null
+++ b/doc/source/api/nova..tests.test_quota.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_quota` Module
+==============================================================================
+.. automodule:: nova..tests.test_quota
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_rpc.rst b/doc/source/api/nova..tests.test_rpc.rst
new file mode 100644
index 000000000..c141d6889
--- /dev/null
+++ b/doc/source/api/nova..tests.test_rpc.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_rpc` Module
+==============================================================================
+.. automodule:: nova..tests.test_rpc
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_scheduler.rst b/doc/source/api/nova..tests.test_scheduler.rst
new file mode 100644
index 000000000..1cd9991db
--- /dev/null
+++ b/doc/source/api/nova..tests.test_scheduler.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_scheduler` Module
+==============================================================================
+.. automodule:: nova..tests.test_scheduler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_service.rst b/doc/source/api/nova..tests.test_service.rst
new file mode 100644
index 000000000..a264fbb55
--- /dev/null
+++ b/doc/source/api/nova..tests.test_service.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_service` Module
+==============================================================================
+.. automodule:: nova..tests.test_service
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_test.rst b/doc/source/api/nova..tests.test_test.rst
new file mode 100644
index 000000000..389eb3c99
--- /dev/null
+++ b/doc/source/api/nova..tests.test_test.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_test` Module
+==============================================================================
+.. automodule:: nova..tests.test_test
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_twistd.rst b/doc/source/api/nova..tests.test_twistd.rst
new file mode 100644
index 000000000..cae0c0a28
--- /dev/null
+++ b/doc/source/api/nova..tests.test_twistd.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_twistd` Module
+==============================================================================
+.. automodule:: nova..tests.test_twistd
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_utils.rst b/doc/source/api/nova..tests.test_utils.rst
new file mode 100644
index 000000000..d61a7021f
--- /dev/null
+++ b/doc/source/api/nova..tests.test_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_utils` Module
+==============================================================================
+.. automodule:: nova..tests.test_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_virt.rst b/doc/source/api/nova..tests.test_virt.rst
new file mode 100644
index 000000000..9b0dc1e46
--- /dev/null
+++ b/doc/source/api/nova..tests.test_virt.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_virt` Module
+==============================================================================
+.. automodule:: nova..tests.test_virt
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_volume.rst b/doc/source/api/nova..tests.test_volume.rst
new file mode 100644
index 000000000..b5affe53c
--- /dev/null
+++ b/doc/source/api/nova..tests.test_volume.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_volume` Module
+==============================================================================
+.. automodule:: nova..tests.test_volume
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_xenapi.rst b/doc/source/api/nova..tests.test_xenapi.rst
new file mode 100644
index 000000000..7128baee4
--- /dev/null
+++ b/doc/source/api/nova..tests.test_xenapi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.test_xenapi` Module
+==============================================================================
+.. automodule:: nova..tests.test_xenapi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..tests.xenapi.stubs.rst b/doc/source/api/nova..tests.xenapi.stubs.rst
new file mode 100644
index 000000000..356eed9a7
--- /dev/null
+++ b/doc/source/api/nova..tests.xenapi.stubs.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..tests.xenapi.stubs` Module
+==============================================================================
+.. automodule:: nova..tests.xenapi.stubs
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst
new file mode 100644
index 000000000..d4145396d
--- /dev/null
+++ b/doc/source/api/nova..twistd.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..twistd` Module
+==============================================================================
+.. automodule:: nova..twistd
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst
new file mode 100644
index 000000000..1131d1080
--- /dev/null
+++ b/doc/source/api/nova..utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..utils` Module
+==============================================================================
+.. automodule:: nova..utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..version.rst b/doc/source/api/nova..version.rst
new file mode 100644
index 000000000..4b0fc078f
--- /dev/null
+++ b/doc/source/api/nova..version.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..version` Module
+==============================================================================
+.. automodule:: nova..version
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst
new file mode 100644
index 000000000..caf766765
--- /dev/null
+++ b/doc/source/api/nova..virt.connection.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.connection` Module
+==============================================================================
+.. automodule:: nova..virt.connection
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.disk.rst b/doc/source/api/nova..virt.disk.rst
new file mode 100644
index 000000000..4a6c0f406
--- /dev/null
+++ b/doc/source/api/nova..virt.disk.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.disk` Module
+==============================================================================
+.. automodule:: nova..virt.disk
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst
new file mode 100644
index 000000000..06ecdbf7d
--- /dev/null
+++ b/doc/source/api/nova..virt.fake.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.fake` Module
+==============================================================================
+.. automodule:: nova..virt.fake
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.hyperv.rst b/doc/source/api/nova..virt.hyperv.rst
new file mode 100644
index 000000000..48d89378e
--- /dev/null
+++ b/doc/source/api/nova..virt.hyperv.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.hyperv` Module
+==============================================================================
+.. automodule:: nova..virt.hyperv
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst
new file mode 100644
index 000000000..4fdeb7af8
--- /dev/null
+++ b/doc/source/api/nova..virt.images.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.images` Module
+==============================================================================
+.. automodule:: nova..virt.images
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst
new file mode 100644
index 000000000..7fb8aed5f
--- /dev/null
+++ b/doc/source/api/nova..virt.libvirt_conn.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.libvirt_conn` Module
+==============================================================================
+.. automodule:: nova..virt.libvirt_conn
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.fake.rst b/doc/source/api/nova..virt.xenapi.fake.rst
new file mode 100644
index 000000000..752dabb14
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.fake.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.fake` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.fake
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.network_utils.rst b/doc/source/api/nova..virt.xenapi.network_utils.rst
new file mode 100644
index 000000000..15f52973e
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.network_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.network_utils` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.network_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.vm_utils.rst b/doc/source/api/nova..virt.xenapi.vm_utils.rst
new file mode 100644
index 000000000..18745dc71
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.vm_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.vm_utils` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.vm_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.vmops.rst b/doc/source/api/nova..virt.xenapi.vmops.rst
new file mode 100644
index 000000000..30662c58d
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.vmops.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.vmops` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.vmops
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.volume_utils.rst b/doc/source/api/nova..virt.xenapi.volume_utils.rst
new file mode 100644
index 000000000..413e4dc4b
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.volume_utils.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.volume_utils` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.volume_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.volumeops.rst b/doc/source/api/nova..virt.xenapi.volumeops.rst
new file mode 100644
index 000000000..626f164df
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi.volumeops.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi.volumeops` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi.volumeops
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi_conn.rst b/doc/source/api/nova..virt.xenapi_conn.rst
new file mode 100644
index 000000000..14ac5147f
--- /dev/null
+++ b/doc/source/api/nova..virt.xenapi_conn.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..virt.xenapi_conn` Module
+==============================================================================
+.. automodule:: nova..virt.xenapi_conn
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.api.rst b/doc/source/api/nova..volume.api.rst
new file mode 100644
index 000000000..8ad36e049
--- /dev/null
+++ b/doc/source/api/nova..volume.api.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.api` Module
+==============================================================================
+.. automodule:: nova..volume.api
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst
new file mode 100644
index 000000000..51f5c0729
--- /dev/null
+++ b/doc/source/api/nova..volume.driver.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.driver` Module
+==============================================================================
+.. automodule:: nova..volume.driver
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst
new file mode 100644
index 000000000..91a192a8f
--- /dev/null
+++ b/doc/source/api/nova..volume.manager.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.manager` Module
+==============================================================================
+.. automodule:: nova..volume.manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..volume.san.rst b/doc/source/api/nova..volume.san.rst
new file mode 100644
index 000000000..1de068928
--- /dev/null
+++ b/doc/source/api/nova..volume.san.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..volume.san` Module
+==============================================================================
+.. automodule:: nova..volume.san
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst
new file mode 100644
index 000000000..0bff1c332
--- /dev/null
+++ b/doc/source/api/nova..wsgi.rst
@@ -0,0 +1,6 @@
+The :mod:`nova..wsgi` Module
+==============================================================================
+.. automodule:: nova..wsgi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/source/community.rst b/doc/source/community.rst
index 4ae32f1eb..e925a47bd 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -18,7 +18,7 @@
Getting Involved
================
-The Nova community is a very friendly group and there are places online to join in with the
+The OpenStack community for Nova is a very friendly group and there are places online to join in with the
community. Feel free to ask questions. This document points you to some of the places where you can
communicate with people.
@@ -83,3 +83,13 @@ Twitter
Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_. Also follow the
`#openstack <http://search.twitter.com/search?q=%23openstack>`_ tag for relevant tweets.
+
+OpenStack Docs Site
+-------------------
+
+The `nova.openstack.org <http://nova.openstack.org>`_ site is geared towards developer documentation,
+and the `docs.openstack.org <http://docs.openstack.org>`_ site is intended for cloud administrators
+who are standing up and running OpenStack Compute in production. You can contribute to the Docs Site
+by using bzr and Launchpad and contributing to the openstack-manuals project at http://launchpad.net/openstack-manuals.
+
+
diff --git a/doc/source/index.rst b/doc/source/index.rst
index d337fb69f..846d3cfcd 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -32,11 +32,13 @@ Nova is written with the following design guidelines in mind:
* **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2
This documentation is generated by the Sphinx toolkit and lives in the source
-tree. Additional documentation on Nova and other components of OpenStack can
-be found on the `OpenStack wiki`_. Also see the :doc:`community` page for
-other ways to interact with the community.
+tree. Additional draft and project documentation on Nova and other components of OpenStack can
+be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_.
+
+Also see the :doc:`community` page for other ways to interact with the community.
.. _`OpenStack wiki`: http://wiki.openstack.org
+.. _`docs.openstack.org`: http://docs.openstack.org
Key Concepts
@@ -50,17 +52,7 @@ Key Concepts
service.architecture
nova.object.model
swift.object.model
-
-Administrator's Documentation
-=============================
-
-.. toctree::
- :maxdepth: 1
-
- livecd
- adminguide/index
- adminguide/single.node.install
- adminguide/multi.node.install
+ runnova/index
Developer Docs
==============
diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst
index bb9d7a7fe..17ba91bef 100644
--- a/doc/source/man/novamanage.rst
+++ b/doc/source/man/novamanage.rst
@@ -179,6 +179,42 @@ Nova Floating IPs
Displays a list of all floating IP addresses.
+Nova Flavor
+~~~~~~~~~~~
+
+``nova-manage flavor list``
+
+ Outputs a list of all active flavors to the screen.
+
+``nova-manage flavor list --all``
+
+ Outputs a list of all flavors (active and inactive) to the screen.
+
+``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
+
+ creates a flavor with the following positional arguments:
+ * memory (expressed in megabytes)
+ * vcpu(s) (integer)
+ * local storage (expressed in gigabytes)
+ * flavorid (unique integer)
+ * swap space (expressed in megabytes, defaults to zero, optional)
+ * RXTX quotas (expressed in gigabytes, defaults to zero, optional)
+ * RXTX cap (expressed in gigabytes, defaults to zero, optional)
+
+``nova-manage flavor delete <name>``
+
+ Delete the flavor with the name <name>. This marks the flavor as inactive and cannot be launched. However, the record stays in the database for archival and billing purposes.
+
+``nova-manage flavor delete <name> --purge``
+
+ Purges the flavor with the name <name>. This removes this flavor from the database.
+
+
+Nova Instance_type
+~~~~~~~~~~~~~~~~~~
+
+The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
+
FILES
========
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index e9687dc98..45cc4b879 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -64,6 +64,11 @@ Concept: Instances
An 'instance' is a word for a virtual machine that runs inside the cloud.
+Concept: Instance Type
+----------------------
+
+An 'instance type' describes the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching.
+
Concept: System Architecture
----------------------------
diff --git a/doc/source/object.model.rst b/doc/source/object.model.rst
index d02f151fd..419e89b0c 100644
--- a/doc/source/object.model.rst
+++ b/doc/source/object.model.rst
@@ -18,8 +18,6 @@
Object Model
============
-.. todo:: Add brief description for core models
-
.. graphviz::
digraph foo {
@@ -42,27 +40,27 @@ Object Model
Users
-----
-Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`.
+Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/runnova/managing.users`.
Projects
--------
-For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`.
+For Nova, access to images is based on the project. Read more at :doc:`/runnova/managing.projects`.
Images
------
-Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`.
+Images are binary files that run the operating system. Read more at :doc:`/runnova/managing.images`.
Instances
---------
-Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`.
+Instances are running virtual servers. Read more at :doc:`/runnova/managing.instances`.
Volumes
-------
-.. todo:: Write doc about volumes
+Volumes offer extra block level storage to instances. Read more at `Managing Volumes <http://docs.openstack.org/openstack-compute/admin/content/ch05s07.html>`_.
Security Groups
---------------
@@ -72,7 +70,7 @@ In Nova, a security group is a named collection of network access rules, like fi
VLANs
-----
-VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`.
+VLAN is the default network mode for Nova. Read more at :doc:`/runnova/network.vlan`.
IP Addresses
------------
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index 17c9e10a8..84ed3fe01 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -54,7 +54,7 @@ Environment Variables
By tweaking the environment that nova.sh run in, you can build slightly
different configurations (though for more complex setups you should see
-:doc:`/adminguide/getting.started` and :doc:`/adminguide/multi.node.install`).
+`Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_).
* HOST_IP
* Default: address of first interface from the ifconfig command
diff --git a/doc/source/adminguide/binaries.rst b/doc/source/runnova/binaries.rst
index 5c50a51f1..023831021 100644
--- a/doc/source/adminguide/binaries.rst
+++ b/doc/source/runnova/binaries.rst
@@ -35,12 +35,12 @@ Nova api receives xml requests and sends them to the rest of the system. It is
nova-objectstore
----------------
-Nova objectstore is an ultra simple file-based storage system for images that replicates most of the S3 Api. It will soon be replaced with glance and a simple image manager.
+Nova objectstore is an ultra simple file-based storage system for images that replicates most of the S3 Api. It will soon be replaced with Glance (http://glance.openstack.org) and a simple image manager.
nova-compute
------------
-Nova compute is responsible for managing virtual machines. It loads a Service object which exposes the public methods on ComputeManager via rpc.
+Nova compute is responsible for managing virtual machines. It loads a Service object which exposes the public methods on ComputeManager via rpc.
nova-volume
-----------
diff --git a/doc/source/adminguide/euca2ools.rst b/doc/source/runnova/euca2ools.rst
index 6f0c57358..6f0c57358 100644
--- a/doc/source/adminguide/euca2ools.rst
+++ b/doc/source/runnova/euca2ools.rst
diff --git a/doc/source/runnova/flags.rst b/doc/source/runnova/flags.rst
new file mode 100644
index 000000000..1bfa022d9
--- /dev/null
+++ b/doc/source/runnova/flags.rst
@@ -0,0 +1,193 @@
+..
+ Copyright 2010-2011 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Flags and Flagfiles
+===================
+
+Nova uses a configuration file containing flags located in /etc/nova/nova.conf. You can get the most recent listing of avaialble flags by running nova-(servicename) --help, for example, nova-api --help.
+
+Here's a list of available flags and their default settings.
+
+ --ajax_console_proxy_port: port that ajax_console_proxy binds
+ (default: '8000')
+ --ajax_console_proxy_topic: the topic ajax proxy nodes listen on
+ (default: 'ajax_proxy')
+ --ajax_console_proxy_url: location of ajax console proxy, in the form
+ "http://127.0.0.1:8000"
+ (default: 'http://127.0.0.1:8000')
+ --auth_token_ttl: Seconds for auth tokens to linger
+ (default: '3600')
+ (an integer)
+ --aws_access_key_id: AWS Access ID
+ (default: 'admin')
+ --aws_secret_access_key: AWS Access Key
+ (default: 'admin')
+ --compute_manager: Manager for compute
+ (default: 'nova.compute.manager.ComputeManager')
+ --compute_topic: the topic compute nodes listen on
+ (default: 'compute')
+ --connection_type: libvirt, xenapi or fake
+ (default: 'libvirt')
+ --console_manager: Manager for console proxy
+ (default: 'nova.console.manager.ConsoleProxyManager')
+ --console_topic: the topic console proxy nodes listen on
+ (default: 'console')
+ --control_exchange: the main exchange to connect to
+ (default: 'nova')
+ --db_backend: The backend to use for db
+ (default: 'sqlalchemy')
+ --default_image: default image to use, testing only
+ (default: 'ami-11111')
+ --default_instance_type: default instance type to use, testing only
+ (default: 'm1.small')
+ --default_log_levels: list of logger=LEVEL pairs
+ (default: 'amqplib=WARN,sqlalchemy=WARN,eventlet.wsgi.server=WARN')
+ (a comma separated list)
+ --default_project: default project for openstack
+ (default: 'openstack')
+ --ec2_dmz_host: internal ip of api server
+ (default: '$my_ip')
+ --ec2_host: ip of api server
+ (default: '$my_ip')
+ --ec2_path: suffix for ec2
+ (default: '/services/Cloud')
+ --ec2_port: cloud controller port
+ (default: '8773')
+ (an integer)
+ --ec2_scheme: prefix for ec2
+ (default: 'http')
+ --[no]enable_new_services: Services to be added to the available pool on
+ create
+ (default: 'true')
+ --[no]fake_network: should we use fake network devices and addresses
+ (default: 'false')
+ --[no]fake_rabbit: use a fake rabbit
+ (default: 'false')
+ --glance_host: glance host
+ (default: '$my_ip')
+ --glance_port: glance port
+ (default: '9292')
+ (an integer)
+ -?,--[no]help: show this help
+ --[no]helpshort: show usage only for this module
+ --[no]helpxml: like --help, but generates XML output
+ --host: name of this node
+ (default: 'osdemo03')
+ --image_service: The service to use for retrieving and searching for images.
+ (default: 'nova.image.s3.S3ImageService')
+ --instance_name_template: Template string to be used to generate instance
+ names
+ (default: 'instance-%08x')
+ --logfile: output to named file
+ --logging_context_format_string: format string to use for log messages with
+ context
+ (default: '%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s
+ %(project)s] %(message)s')
+ --logging_debug_format_suffix: data to append to log format when level is
+ DEBUG
+ (default: 'from %(processName)s (pid=%(process)d) %(funcName)s
+ %(pathname)s:%(lineno)d')
+ --logging_default_format_string: format string to use for log messages without
+ context
+ (default: '%(asctime)s %(levelname)s %(name)s [-] %(message)s')
+ --logging_exception_prefix: prefix each line of exception output with this
+ format
+ (default: '(%(name)s): TRACE: ')
+ --my_ip: host ip address
+ (default: '184.106.73.68')
+ --network_manager: Manager for network
+ (default: 'nova.network.manager.VlanManager')
+ --network_topic: the topic network nodes listen on
+ (default: 'network')
+ --node_availability_zone: availability zone of this node
+ (default: 'nova')
+ --null_kernel: kernel image that indicates not to use a kernel, but to use a
+ raw disk image instead
+ (default: 'nokernel')
+ --osapi_host: ip of api server
+ (default: '$my_ip')
+ --osapi_path: suffix for openstack
+ (default: '/v1.0/')
+ --osapi_port: OpenStack API port
+ (default: '8774')
+ (an integer)
+ --osapi_scheme: prefix for openstack
+ (default: 'http')
+ --periodic_interval: seconds between running periodic tasks
+ (default: '60')
+ (a positive integer)
+ --pidfile: pidfile to use for this service
+ --rabbit_host: rabbit host
+ (default: 'localhost')
+ --rabbit_max_retries: rabbit connection attempts
+ (default: '12')
+ (an integer)
+ --rabbit_password: rabbit password
+ (default: 'guest')
+ --rabbit_port: rabbit port
+ (default: '5672')
+ (an integer)
+ --rabbit_retry_interval: rabbit connection retry interval
+ (default: '10')
+ (an integer)
+ --rabbit_userid: rabbit userid
+ (default: 'guest')
+ --rabbit_virtual_host: rabbit virtual host
+ (default: '/')
+ --region_list: list of region=fqdn pairs separated by commas
+ (default: '')
+ (a comma separated list)
+ --report_interval: seconds between nodes reporting state to datastore
+ (default: '10')
+ (a positive integer)
+ --s3_dmz: s3 dmz ip (for instances)
+ (default: '$my_ip')
+ --s3_host: s3 host (for infrastructure)
+ (default: '$my_ip')
+ --s3_port: s3 port
+ (default: '3333')
+ (an integer)
+ --scheduler_manager: Manager for scheduler
+ (default: 'nova.scheduler.manager.SchedulerManager')
+ --scheduler_topic: the topic scheduler nodes listen on
+ (default: 'scheduler')
+ --sql_connection: connection string for sql database
+ (default: 'sqlite:///$state_path/nova.sqlite')
+ --sql_idle_timeout: timeout for idle sql database connections
+ (default: '3600')
+ --sql_max_retries: sql connection attempts
+ (default: '12')
+ (an integer)
+ --sql_retry_interval: sql connection retry interval
+ (default: '10')
+ (an integer)
+ --state_path: Top-level directory for maintaining nova's state
+ (default: '/usr/lib/pymodules/python2.6/nova/../')
+ --[no]use_syslog: output to syslog
+ (default: 'false')
+ --[no]verbose: show debug output
+ (default: 'false')
+ --volume_manager: Manager for volume
+ (default: 'nova.volume.manager.VolumeManager')
+ --volume_name_template: Template string to be used to generate instance names
+ (default: 'volume-%08x')
+ --volume_topic: the topic volume nodes listen on
+ (default: 'volume')
+ --vpn_image_id: AMI for cloudpipe vpn server
+ (default: 'ami-cloudpipe')
+ --vpn_key_suffix: Suffix to add to project name for vpn key and secgroups
+ (default: '-vpn') \ No newline at end of file
diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/runnova/getting.started.rst
index 675d8e664..4cc7307b0 100644
--- a/doc/source/adminguide/getting.started.rst
+++ b/doc/source/runnova/getting.started.rst
@@ -105,11 +105,10 @@ Configuration
Configuring the host system
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-As you read through the Administration Guide you will notice configuration hints
-inline with documentation on the subsystem you are configuring. Presented in
-this "Getting Started with Nova" document, we only provide what you need to
-get started as quickly as possible. For a more detailed description of system
-configuration, start reading through :doc:`multi.node.install`.
+Nova can be configured in many different ways. In this "Getting Started with Nova" document, we only provide what you need to get started as quickly as possible. For a more detailed description of system
+configuration, start reading through `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_.
+
+`Detailed instructions for creating a volume group are available <http://docs.openstack.org/openstack-compute/admin/content/ch05s07.html>`_, or use these quick instructions.
* Create a volume group (you can use an actual disk for the volume group as
well)::
@@ -136,6 +135,8 @@ flagfile, so typically a file like ``nova-manage.conf`` would have as its first
line ``--flagfile=/etc/nova/nova.conf`` to load the common flags before
specifying overrides or additional options.
+To get a current comprehensive list of flag file options, run bin/nova-<servicename> --help, or refer to a static list at `Reference for Flags in nova.conf <http://docs.openstack.org/openstack-compute/admin/content/ch05s08.html>`_.
+
A sample configuration to test the system follows::
--verbose
@@ -143,13 +144,13 @@ A sample configuration to test the system follows::
--auth_driver=nova.auth.dbdriver.DbDriver
Running
----------
+-------
There are many parts to the nova system, each with a specific function. They
are built to be highly-available, so there are may configurations they can be
run in (ie: on many machines, many listeners per machine, etc). This part
of the guide only gets you started quickly, to learn about HA options, see
-:doc:`multi.node.install`.
+`Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_.
Launch supporting services
diff --git a/doc/source/adminguide/index.rst b/doc/source/runnova/index.rst
index 3bd72cfdc..283d268ce 100644
--- a/doc/source/adminguide/index.rst
+++ b/doc/source/runnova/index.rst
@@ -15,17 +15,17 @@
License for the specific language governing permissions and limitations
under the License.
-Administration Guide
-====================
+Running Nova
+============
-This guide describes the basics of running and managing Nova.
+This guide describes the basics of running and managing Nova. For more administrator's documentation, refer to `docs.openstack.org <http://docs.openstack.org>`_.
Running the Cloud
-----------------
-The fastest way to get a test cloud running is by following the directions in the :doc:`../quickstart`.
+The fastest way to get a test cloud running is by following the directions in the :doc:`../quickstart`. It relies on a nova.sh script to run on a single machine.
-Nova's cloud works via the interaction of a series of daemon processes that reside persistently on the host machine(s). Fortunately, the :doc:`../quickstart` process launches sample versions of all these daemons for you. Once you are familiar with basic Nova usage, you can learn more about daemons by reading :doc:`../service.architecture` and :doc:`binaries`.
+Nova's cloud works via the interaction of a series of daemon processes that reside persistently on the host machine(s). Fortunately, the :doc:`../quickstart` process launches sample versions of all these daemons for you. Once you are familiar with basic Nova usage, you can learn more about daemons by reading :doc:`../service.architecture` and :doc:`binaries`.
Administration Utilities
------------------------
@@ -60,12 +60,12 @@ For background on the core objects referenced in this section, see :doc:`../obje
Deployment
----------
-For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq).
+For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). For instructions on multi-server installations, refer to `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_.
+
.. toctree::
:maxdepth: 1
- multi.node.install
dbsync
@@ -75,7 +75,6 @@ Networking
.. toctree::
:maxdepth: 1
- multi.node.install
network.vlan.rst
network.flat.rst
diff --git a/doc/source/adminguide/managing.images.rst b/doc/source/runnova/managing.images.rst
index c5d93a6e8..c5d93a6e8 100644
--- a/doc/source/adminguide/managing.images.rst
+++ b/doc/source/runnova/managing.images.rst
diff --git a/doc/source/runnova/managing.instance.types.rst b/doc/source/runnova/managing.instance.types.rst
new file mode 100644
index 000000000..746077716
--- /dev/null
+++ b/doc/source/runnova/managing.instance.types.rst
@@ -0,0 +1,84 @@
+..
+ Copyright 2011 Ken Pepple
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Managing Instance Types and Flavors
+===================================
+
+What are Instance Types or Flavors ?
+------------------------------------
+
+Instance types describe the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. In the EC2 API, these are called by names such as "m1.large" or "m1.tiny", while the OpenStack API terms these "flavors" with names like "512 MB Server".
+
+In Nova, "flavor" and "instance type" are equivalent terms. When you create an EC2 instance type, you are also creating a OpenStack API flavor. To reduce repetition, for the rest of this document I will refer to these as instance types.
+
+Instance types can be in either the active or inactive state:
+ * Active instance types are available to be used for launching instances
+ * Inactive instance types are not available for launching instances
+
+In the current (Cactus) version of nova, instance types can only be created by the nova administrator through the nova-manage command. Future versions of nova (in concert with the OpenStack API or EC2 API), may expose this functionality directly to users.
+
+Basic Management
+----------------
+
+Instance types / flavor are managed through the nova-manage binary with
+the "instance_type" command and an appropriate subcommand. Note that you can also use
+the "flavor" command as a synonym for "instance_types".
+
+To see all currently active instance types, use the list subcommand::
+
+ # nova-manage instance_type list
+ m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+
+By default, the list subcommand only shows active instance types. To see all instance types (inactive and active), use the list subcommand with the "--all" flag::
+
+ # nova-manage instance_type list --all
+ m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
+ m1.deleted: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB, inactive
+
+To create an instance type, use the "create" subcommand with the following positional arguments:
+ * memory (expressed in megabytes)
+ * vcpu(s) (integer)
+ * local storage (expressed in gigabytes)
+ * flavorid (unique integer)
+ * swap space (expressed in megabytes, defaults to zero, optional)
+ * RXTX quotas (expressed in gigabytes, defaults to zero, optional)
+ * RXTX cap (expressed in gigabytes, defaults to zero, optional)
+
+The following example creates an instance type named "m1.xxlarge"::
+
+ # nova-manage instance_type create m1.xxlarge 32768 16 320 0 0 0
+ m1.xxlarge created
+
+To delete an instance type, use the "delete" subcommand and specify the name::
+
+ # nova-manage instance_type delete m1.xxlarge
+ m1.xxlarge deleted
+
+Please note that the "delete" command only marks the instance type as
+inactive in the database; it does not actually remove the instance type. This is done
+to preserve the instance type definition for long running instances (which may not
+terminate for months or years). If you are sure that you want to delete this instance
+type from the database, pass the "--purge" flag after the name::
+
+ # nova-manage instance_type delete m1.xxlarge --purge
+ m1.xxlarge purged
diff --git a/doc/source/adminguide/managing.instances.rst b/doc/source/runnova/managing.instances.rst
index e62352017..e62352017 100644
--- a/doc/source/adminguide/managing.instances.rst
+++ b/doc/source/runnova/managing.instances.rst
diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/runnova/managing.networks.rst
index 9eea46d70..9eea46d70 100644
--- a/doc/source/adminguide/managing.networks.rst
+++ b/doc/source/runnova/managing.networks.rst
diff --git a/doc/source/adminguide/managing.projects.rst b/doc/source/runnova/managing.projects.rst
index 5dd7f2de9..5dd7f2de9 100644
--- a/doc/source/adminguide/managing.projects.rst
+++ b/doc/source/runnova/managing.projects.rst
diff --git a/doc/source/adminguide/managing.users.rst b/doc/source/runnova/managing.users.rst
index 392142e86..392142e86 100644
--- a/doc/source/adminguide/managing.users.rst
+++ b/doc/source/runnova/managing.users.rst
diff --git a/doc/source/adminguide/managingsecurity.rst b/doc/source/runnova/managingsecurity.rst
index 7893925e7..7893925e7 100644
--- a/doc/source/adminguide/managingsecurity.rst
+++ b/doc/source/runnova/managingsecurity.rst
diff --git a/doc/source/adminguide/monitoring.rst b/doc/source/runnova/monitoring.rst
index 2c93c71b5..2c93c71b5 100644
--- a/doc/source/adminguide/monitoring.rst
+++ b/doc/source/runnova/monitoring.rst
diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/runnova/network.flat.rst
index 3d8680c6f..3d8680c6f 100644
--- a/doc/source/adminguide/network.flat.rst
+++ b/doc/source/runnova/network.flat.rst
diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/runnova/network.vlan.rst
index c06ce8e8b..c06ce8e8b 100644
--- a/doc/source/adminguide/network.vlan.rst
+++ b/doc/source/runnova/network.vlan.rst
diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/runnova/nova.manage.rst
index 0e9a29b6b..0e9a29b6b 100644
--- a/doc/source/adminguide/nova.manage.rst
+++ b/doc/source/runnova/nova.manage.rst
diff --git a/nova/__init__.py b/nova/__init__.py
index 8745617bc..256db55a9 100644
--- a/nova/__init__.py
+++ b/nova/__init__.py
@@ -30,5 +30,3 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
-
-from exception import *
diff --git a/nova/adminclient.py b/nova/adminclient.py
index c614b274c..fc3c5c5fe 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -23,6 +23,8 @@ import base64
import boto
import boto.exception
import httplib
+import re
+import string
from boto.ec2.regioninfo import RegionInfo
@@ -165,19 +167,20 @@ class HostInfo(object):
**Fields Include**
- * Disk stats
- * Running Instances
- * Memory stats
- * CPU stats
- * Network address info
- * Firewall info
- * Bridge and devices
-
+ * Hostname
+ * Compute service status
+ * Volume service status
+ * Instance count
+ * Volume count
"""
def __init__(self, connection=None):
self.connection = connection
self.hostname = None
+ self.compute = None
+ self.volume = None
+ self.instance_count = 0
+ self.volume_count = 0
def __repr__(self):
return 'Host:%s' % self.hostname
@@ -188,7 +191,39 @@ class HostInfo(object):
# this is needed by the sax parser, so ignore the ugly name
def endElement(self, name, value, connection):
- setattr(self, name, value)
+ fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
+ setattr(self, fixed_name, value)
+
+
+class Vpn(object):
+ """
+ Information about a Vpn, as parsed through SAX
+
+ **Fields Include**
+
+ * instance_id
+ * project_id
+ * public_ip
+ * public_port
+ * created_at
+ * internal_ip
+ * state
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.instance_id = None
+ self.project_id = None
+
+ def __repr__(self):
+ return 'Vpn:%s:%s' % (self.project_id, self.instance_id)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name))
+ setattr(self, fixed_name, value)
class InstanceType(object):
@@ -422,6 +457,16 @@ class NovaAdminClient(object):
zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo)
return zip.file
+ def start_vpn(self, project):
+ """
+ Starts the vpn for a user
+ """
+ return self.apiconn.get_object('StartVpn', {'Project': project}, Vpn)
+
+ def get_vpns(self):
+ """Return a list of vpn with project name"""
+ return self.apiconn.get_list('DescribeVpns', {}, [('item', Vpn)])
+
def get_hosts(self):
return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 735951082..d9a4ef999 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -21,14 +21,17 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
+import datetime
from nova import db
from nova import exception
+from nova import flags
from nova import log as logging
+from nova import utils
from nova.auth import manager
-from nova.compute import instance_types
+FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.ec2.admin')
@@ -55,22 +58,54 @@ def project_dict(project):
return {}
-def host_dict(host):
+def host_dict(host, compute_service, instances, volume_service, volumes, now):
"""Convert a host model object to a result dict"""
- if host:
- return host.state
- else:
- return {}
+ rv = {'hostanme': host, 'instance_count': len(instances),
+ 'volume_count': len(volumes)}
+ if compute_service:
+ latest = compute_service['updated_at'] or compute_service['created_at']
+ delta = now - latest
+ if delta.seconds <= FLAGS.service_down_time:
+ rv['compute'] = 'up'
+ else:
+ rv['compute'] = 'down'
+ if volume_service:
+ latest = volume_service['updated_at'] or volume_service['created_at']
+ delta = now - latest
+ if delta.seconds <= FLAGS.service_down_time:
+ rv['volume'] = 'up'
+ else:
+ rv['volume'] = 'down'
+ return rv
-def instance_dict(name, inst):
- return {'name': name,
+def instance_dict(inst):
+ return {'name': inst['name'],
'memory_mb': inst['memory_mb'],
'vcpus': inst['vcpus'],
'disk_gb': inst['local_gb'],
'flavor_id': inst['flavorid']}
+def vpn_dict(project, vpn_instance):
+ rv = {'project_id': project.id,
+ 'public_ip': project.vpn_ip,
+ 'public_port': project.vpn_port}
+ if vpn_instance:
+ rv['instance_id'] = vpn_instance['ec2_id']
+ rv['created_at'] = utils.isotime(vpn_instance['created_at'])
+ address = vpn_instance.get('fixed_ip', None)
+ if address:
+ rv['internal_ip'] = address['address']
+ if utils.vpn_ping(project.vpn_ip, project.vpn_port):
+ rv['state'] = 'running'
+ else:
+ rv['state'] = 'down'
+ else:
+ rv['state'] = 'pending'
+ return rv
+
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -79,9 +114,9 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
- def describe_instance_types(self, _context, **_kwargs):
- return {'instanceTypeSet': [instance_dict(n, v) for n, v in
- instance_types.INSTANCE_TYPES.iteritems()]}
+ def describe_instance_types(self, context, **_kwargs):
+ """Returns all active instance types data (vcpus, memory, etc.)"""
+ return {'instanceTypeSet': [db.instance_type_get_all(context)]}
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
@@ -223,19 +258,68 @@ class AdminController(object):
raise exception.ApiError(_('operation must be add or remove'))
return True
+ def _vpn_for(self, context, project_id):
+ """Get the VPN instance for a project ID."""
+ for instance in db.instance_get_all_by_project(context, project_id):
+ if (instance['image_id'] == FLAGS.vpn_image_id
+ and not instance['state_description'] in
+ ['shutting_down', 'shutdown']):
+ return instance
+
+ def start_vpn(self, context, project):
+ instance = self._vpn_for(context, project)
+ if not instance:
+ # NOTE(vish) import delayed because of __init__.py
+ from nova.cloudpipe import pipelib
+ pipe = pipelib.CloudPipe()
+ try:
+ pipe.launch_vpn_instance(project)
+ except db.NoMoreNetworks:
+ raise exception.ApiError("Unable to claim IP for VPN instance"
+ ", ensure it isn't running, and try "
+ "again in a few minutes")
+ instance = self._vpn_for(context, project)
+ return {'instance_id': instance['ec2_id']}
+
+ def describe_vpns(self, context):
+ vpns = []
+ for project in manager.AuthManager().get_projects():
+ instance = self._vpn_for(context, project.id)
+ vpns.append(vpn_dict(project, instance))
+ return {'items': vpns}
+
# FIXME(vish): these host commands don't work yet, perhaps some of the
# required data can be retrieved from service objects?
- def describe_hosts(self, _context, **_kwargs):
+ def describe_hosts(self, context, **_kwargs):
"""Returns status info for all nodes. Includes:
- * Disk Space
- * Instance List
- * RAM used
- * CPU used
- * DHCP servers running
- * Iptables / bridges
+ * Hostname
+ * Compute (up, down, None)
+ * Instance count
+ * Volume (up, down, None)
+ * Volume Count
"""
- return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
+ services = db.service_get_all(context)
+ now = datetime.datetime.utcnow()
+ hosts = []
+ rv = []
+ for host in [service['host'] for service in services]:
+ if not host in hosts:
+ hosts.append(host)
+ for host in hosts:
+ compute = [s for s in services if s['host'] == host \
+ and s['binary'] == 'nova-compute']
+ if compute:
+ compute = compute[0]
+ instances = db.instance_get_all_by_host(context, host)
+ volume = [s for s in services if s['host'] == host \
+ and s['binary'] == 'nova-volume']
+ if volume:
+ volume = volume[0]
+ volumes = db.volume_get_all_by_host(context, host)
+ rv.append(host_dict(host, compute, instances, volume, volumes,
+ now))
+ return {'hosts': rv}
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 2b1acba5a..d7ad08d2f 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -52,7 +52,23 @@ def _database_to_isoformat(datetimeobj):
def _try_convert(value):
- """Return a non-string if possible"""
+ """Return a non-string from a string or unicode, if possible.
+
+ ============= =====================================================
+ When value is returns
+ ============= =====================================================
+ zero-length ''
+ 'None' None
+ 'True' True
+ 'False' False
+ '0', '-0' 0
+ 0xN, -0xN int from hex (postitive) (N is any number)
+ 0bN, -0bN int from binary (positive) (N is any number)
+ * try conversion to int, float, complex, fallback value
+
+ """
+ if len(value) == 0:
+ return ''
if value == 'None':
return None
if value == 'True':
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 882cdcfc9..0d22a3f46 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -198,8 +198,9 @@ class CloudController(object):
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
- enabled_services = db.service_get_all(context)
- disabled_services = db.service_get_all(context, True)
+ ctxt = context.elevated()
+ enabled_services = db.service_get_all(ctxt)
+ disabled_services = db.service_get_all(ctxt, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
@@ -297,7 +298,7 @@ class CloudController(object):
'keyFingerprint': key_pair['fingerprint'],
})
- return {'keypairsSet': result}
+ return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
@@ -318,14 +319,19 @@ class CloudController(object):
def describe_security_groups(self, context, group_name=None, **kwargs):
self.compute_api.ensure_default_security_group(context)
- if context.is_admin:
+ if group_name:
+ groups = []
+ for name in group_name:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ name)
+ groups.append(group)
+ elif context.is_admin:
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
context.project_id)
groups = [self._format_security_group(context, g) for g in groups]
- if not group_name is None:
- groups = [g for g in groups if g.name in group_name]
return {'securityGroupInfo':
list(sorted(groups,
@@ -529,8 +535,9 @@ class CloudController(object):
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
- internal_id = ec2_id_to_id(ec2_id)
- return self.compute_api.get_ajax_console(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ return self.compute_api.get_ajax_console(context,
+ instance_id=instance_id)
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
@@ -669,7 +676,8 @@ class CloudController(object):
instances = []
for ec2_id in instance_id:
internal_id = ec2_id_to_id(ec2_id)
- instance = self.compute_api.get(context, internal_id)
+ instance = self.compute_api.get(context,
+ instance_id=internal_id)
instances.append(instance)
else:
instances = self.compute_api.get_all(context, **kwargs)
@@ -830,14 +838,14 @@ class CloudController(object):
self.compute_api.unrescue(context, instance_id=instance_id)
return True
- def update_instance(self, context, ec2_id, **kwargs):
+ def update_instance(self, context, instance_id, **kwargs):
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2_id_to_id(instance_id)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
@@ -882,7 +890,6 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
- image = self.image_service.show(context, image_id)
image = self._format_image(context,
self.image_service.show(context,
image_id))
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index d0b18eced..274330e3b 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -74,13 +74,17 @@ class APIRouter(wsgi.Router):
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
LOG.debug(_("Including admin operations in API."))
+
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"
server_members["actions"] = "GET"
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
+ server_members['rescue'] = 'POST'
+ server_members['unrescue'] = 'POST'
server_members['reset_network'] = 'POST'
+ server_members['inject_network_info'] = 'POST'
mapper.resource("zone", "zones", controller=zones.Controller(),
collection={'detail': 'GET'})
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index c3fe0cc8c..6011e6115 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -26,6 +26,7 @@ import webob.dec
from nova import auth
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import manager
from nova import utils
@@ -103,11 +104,14 @@ class AuthMiddleware(wsgi.Middleware):
2 days ago.
"""
ctxt = context.get_admin_context()
- token = self.db.auth_get_token(ctxt, token_hash)
+ try:
+ token = self.db.auth_token_get(ctxt, token_hash)
+ except exception.NotFound:
+ return None
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
- self.db.auth_destroy_token(ctxt, token)
+ self.db.auth_token_destroy(ctxt, token.token_hash)
else:
return self.auth.get_user(token.user_id)
return None
@@ -120,8 +124,8 @@ class AuthMiddleware(wsgi.Middleware):
req - webob.Request object
"""
ctxt = context.get_admin_context()
- user = self.auth.get_user_from_access_key(username)
- if user and user.secret == key:
+ user = self.auth.get_user_from_access_key(key)
+ if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
time.time())).hexdigest()
token_dict = {}
@@ -131,6 +135,6 @@ class AuthMiddleware(wsgi.Middleware):
token_dict['server_management_url'] = req.url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
- token = self.db.auth_create_token(ctxt, token_dict)
+ token = self.db.auth_token_create(ctxt, token_dict)
return token, user
return None, None
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 1dc3767e2..9f85c5c8a 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import webob.exc
+
from nova import exception
@@ -27,7 +29,8 @@ def limited(items, request, max_limit=1000):
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
- to max_limit.
+ to max_limit. Negative values for either offset or limit
+ will cause exc.HTTPBadRequest() exceptions to be raised.
@kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
@@ -40,6 +43,9 @@ def limited(items, request, max_limit=1000):
except ValueError:
limit = max_limit
+ if offset < 0 or limit < 0:
+ raise webob.exc.HTTPBadRequest()
+
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index f620d4107..f3d040ba3 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -17,6 +17,8 @@
from webob import exc
+from nova import db
+from nova import context
from nova.api.openstack import faults
from nova.api.openstack import common
from nova.compute import instance_types
@@ -39,19 +41,19 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all flavors in detail."""
- items = [self.show(req, id)['flavor'] for id in self._all_ids()]
- items = common.limited(items, req)
+ items = [self.show(req, id)['flavor'] for id in self._all_ids(req)]
return dict(flavors=items)
def show(self, req, id):
"""Return data about the given flavor id."""
- for name, val in instance_types.INSTANCE_TYPES.iteritems():
- if val['flavorid'] == int(id):
- item = dict(ram=val['memory_mb'], disk=val['local_gb'],
- id=val['flavorid'], name=name)
- return dict(flavor=item)
+ ctxt = req.environ['nova.context']
+ values = db.instance_type_get_by_flavor_id(ctxt, id)
+ return dict(flavor=values)
raise faults.Fault(exc.HTTPNotFound())
- def _all_ids(self):
+ def _all_ids(self, req):
"""Return the list of all flavorids."""
- return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()]
+ ctxt = req.environ['nova.context']
+ inst_types = db.instance_type_get_all(ctxt)
+ flavor_ids = [inst_types[i]['flavorid'] for i in inst_types.keys()]
+ return sorted(flavor_ids)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 0bac4c64d..c2bf42b72 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import hashlib
import json
import traceback
@@ -50,7 +51,8 @@ def _translate_detail_keys(inst):
power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
- power_state.CRASHED: 'error'}
+ power_state.CRASHED: 'error',
+ power_state.FAILED: 'error'}
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
@@ -63,22 +65,22 @@ def _translate_detail_keys(inst):
inst_dict['addresses'] = dict(public=[], private=[])
# grab single private fixed ip
- try:
- private_ip = inst['fixed_ip']['address']
- if private_ip:
- inst_dict['addresses']['private'].append(private_ip)
- except KeyError:
- LOG.debug(_("Failed to read private ip"))
+ private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ inst_dict['addresses']['private'] = private_ips
# grab all public floating ips
- try:
- for floating in inst['fixed_ip']['floating_ips']:
- inst_dict['addresses']['public'].append(floating['address'])
- except KeyError:
- LOG.debug(_("Failed to read public ip(s)"))
+ public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ inst_dict['addresses']['public'] = public_ips
+
+ # Return the metadata as a dictionary
+ metadata = {}
+ for item in inst['metadata']:
+ metadata[item['key']] = item['value']
+ inst_dict['metadata'] = metadata
- inst_dict['metadata'] = {}
inst_dict['hostId'] = ''
+ if inst['host']:
+ inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest()
return dict(server=inst_dict)
@@ -137,42 +139,35 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, req, image_id):
- """
- Machine images are associated with Kernels and Ramdisk images via
- metadata stored in Glance as 'image_properties'
- """
- def lookup(param):
- _image_id = image_id
- try:
- return image['properties'][param]
- except KeyError:
- raise exception.NotFound(
- _("%(param)s property not found for image %(_image_id)s") %
- locals())
-
- image_id = str(image_id)
- image = self._image_service.show(req.environ['nova.context'], image_id)
- return lookup('kernel_id'), lookup('ramdisk_id')
-
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- key_pairs = auth_manager.AuthManager.get_key_pairs(
- req.environ['nova.context'])
+ context = req.environ['nova.context']
+ key_pairs = auth_manager.AuthManager.get_key_pairs(context)
if not key_pairs:
raise exception.NotFound(_("No keypairs defined"))
key_pair = key_pairs[0]
image_id = common.get_image_id_from_image_hash(self._image_service,
- req.environ['nova.context'], env['server']['imageId'])
+ context, env['server']['imageId'])
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id)
+
+ # Metadata is a list, not a Dictionary, because we allow duplicate keys
+ # (even though JSON can't encode this)
+ # In future, we may not allow duplicate keys.
+ # However, the CloudServers API is not definitive on this front,
+ # and we want to be compatible.
+ metadata = []
+ if env['server'].get('metadata'):
+ for k, v in env['server']['metadata'].items():
+ metadata.append({'key': k, 'value': v})
+
instances = self.compute_api.create(
- req.environ['nova.context'],
+ context,
instance_types.get_by_flavor_id(env['server']['flavorId']),
image_id,
kernel_id=kernel_id,
@@ -181,6 +176,7 @@ class Controller(wsgi.Controller):
display_description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'],
+ metadata=metadata,
onset_files=env.get('onset_files', []))
return _translate_keys(instances[0])
@@ -207,10 +203,58 @@ class Controller(wsgi.Controller):
return exc.HTTPNoContent()
def action(self, req, id):
- """ Multi-purpose method used to reboot, rebuild, and
- resize a server """
+ """Multi-purpose method used to reboot, rebuild, or
+ resize a server"""
+
+ actions = {
+ 'reboot': self._action_reboot,
+ 'resize': self._action_resize,
+ 'confirmResize': self._action_confirm_resize,
+ 'revertResize': self._action_revert_resize,
+ 'rebuild': self._action_rebuild,
+ }
+
input_dict = self._deserialize(req.body, req)
- #TODO(sandy): rebuild/resize not supported.
+ for key in actions.keys():
+ if key in input_dict:
+ return actions[key](input_dict, req, id)
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def _action_confirm_resize(self, input_dict, req, id):
+ try:
+ self.compute_api.confirm_resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in confirm-resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPNoContent()
+
+ def _action_revert_resize(self, input_dict, req, id):
+ try:
+ self.compute_api.revert_resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in revert-resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
+ def _action_rebuild(self, input_dict, req, id):
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def _action_resize(self, input_dict, req, id):
+ """ Resizes a given instance to the flavor size requested """
+ try:
+ if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
+ flavor_id = input_dict['resize']['flavorId']
+ self.compute_api.resize(req.environ['nova.context'], id,
+ flavor_id)
+ else:
+ LOG.exception(_("Missing arguments for resize"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ except Exception, e:
+ LOG.exception(_("Error in resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return faults.Fault(exc.HTTPAccepted())
+
+ def _action_reboot(self, input_dict, req, id):
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@@ -281,6 +325,20 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def inject_network_info(self, req, id):
+ """
+ Inject network info for an instance (admin only).
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.inject_network_info(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::inject_network_info %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
@@ -325,6 +383,28 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def rescue(self, req, id):
+ """Permit users to rescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.rescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::rescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def unrescue(self, req, id):
+ """Permit users to unrescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.unrescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::unrescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def get_ajax_console(self, req, id):
""" Returns a url to an instance's ajaxterm console. """
try:
@@ -352,3 +432,37 @@ class Controller(wsgi.Controller):
action=item.action,
error=item.error))
return dict(actions=actions)
+
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """Retrevies kernel and ramdisk IDs from Glance
+
+ Only 'machine' (ami) type use kernel and ramdisk outside of the
+ image.
+ """
+ # FIXME(sirp): Since we're retrieving the kernel_id from an
+ # image_property, this means only Glance is supported.
+ # The BaseImageService needs to expose a consistent way of accessing
+ # kernel_id and ramdisk_id
+ image = self._image_service.show(req.environ['nova.context'], image_id)
+
+ if image['status'] != 'active':
+ raise exception.Invalid(
+ _("Cannot build from image %(image_id)s, status not active") %
+ locals())
+
+ if image['type'] != 'machine':
+ return None, None
+
+ try:
+ kernel_id = image['properties']['kernel_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Kernel not found for image %(image_id)s") % locals())
+
+ try:
+ ramdisk_id = image['properties']['ramdisk_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Ramdisk not found for image %(image_id)s") % locals())
+
+ return kernel_id, ramdisk_id
diff --git a/nova/apiservice.py b/nova/apiservice.py
deleted file mode 100644
index 03aa781fb..000000000
--- a/nova/apiservice.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Wrapper for API service, makes it look more like the non-WSGI services"""
-
-from nova import flags
-from nova import log as logging
-from nova import version
-from nova import wsgi
-
-
-LOG = logging.getLogger('nova.api')
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('ec2_listen', "0.0.0.0",
- 'IP address for EC2 API to listen')
-flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
-flags.DEFINE_string('osapi_listen', "0.0.0.0",
- 'IP address for OpenStack API to listen')
-flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
-
-
-API_ENDPOINTS = ['ec2', 'osapi']
-
-
-def _run_app(paste_config_file):
- LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file)
- apps = []
- for api in API_ENDPOINTS:
- config = wsgi.load_paste_configuration(paste_config_file, api)
- if config is None:
- LOG.debug(_("No paste configuration for app: %s"), api)
- continue
- LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
- LOG.info(_("Running %s API"), api)
- app = wsgi.load_paste_app(paste_config_file, api)
- apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
- getattr(FLAGS, "%s_listen" % api)))
- if len(apps) == 0:
- LOG.error(_("No known API applications configured in %s."),
- paste_config_file)
- return
-
- server = wsgi.Server()
- for app in apps:
- server.start(*app)
- return server
-
-
-class ApiService(object):
- """Base class for workers that run on hosts."""
-
- def __init__(self, conf):
- self.conf = conf
- self.wsgi_app = None
-
- def start(self):
- self.wsgi_app = _run_app(self.conf)
-
- def wait(self):
- self.wsgi_app.wait()
-
- @classmethod
- def create(cls):
- conf = wsgi.paste_config_file('nova-api.conf')
- LOG.audit(_("Starting nova-api node (version %s)"),
- version.version_string_with_vcs())
- service = cls(conf)
- return service
-
-
-def serve(conf):
- LOG.audit(_("Starting nova-api node (version %s)"),
- version.version_string_with_vcs())
- LOG.debug(_("Full set of FLAGS:"))
- for flag in FLAGS:
- flag_get = FLAGS.get(flag, None)
- LOG.debug("%(flag)s : %(flag_get)s" % locals())
-
- service = ApiService(conf)
- service.start()
-
- return service
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 81ea6dc53..33d25fc4b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -85,12 +85,12 @@ class API(base.Base):
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
- availability_zone=None, user_data=None,
+ availability_zone=None, user_data=None, metadata=[],
onset_files=None):
"""Create the number of instances requested if quota and
- other arguments check out ok.
- """
- type_data = instance_types.INSTANCE_TYPES[instance_type]
+ other arguments check out ok."""
+
+ type_data = instance_types.get_instance_type(instance_type)
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
pid = context.project_id
@@ -100,11 +100,36 @@ class API(base.Base):
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
+ num_metadata = len(metadata)
+ quota_metadata = quota.allowed_metadata_items(context, num_metadata)
+ if quota_metadata < num_metadata:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " tried to set %(num_metadata)s metadata properties")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
+ # Because metadata is stored in the DB, we hard-code the size limits
+ # In future, we may support more variable length strings, so we act
+ # as if this is quota-controlled for forwards compatibility
+ for metadata_item in metadata:
+ k = metadata_item['key']
+ v = metadata_item['value']
+ if len(k) > 255 or len(v) > 255:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " metadata property key or value too long")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
image = self.image_service.show(context, image_id)
if kernel_id is None:
kernel_id = image.get('kernel_id', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdisk_id', None)
+ # FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
@@ -154,6 +179,7 @@ class API(base.Base):
'key_name': key_name,
'key_data': key_data,
'locked': False,
+ 'metadata': metadata,
'availability_zone': availability_zone}
elevated = context.elevated()
instances = []
@@ -293,12 +319,12 @@ class API(base.Base):
try:
instance = self.get(context, instance_id)
except exception.NotFound:
- LOG.warning(_("Instance %d was not found during terminate"),
+ LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
if (instance['state_description'] == 'terminating'):
- LOG.warning(_("Instance %d is already being terminated"),
+ LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
@@ -378,6 +404,10 @@ class API(base.Base):
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
+ def _cast_scheduler_message(self, context, args):
+ """Generic handler for RPC calls to the scheduler"""
+ rpc.cast(context, FLAGS.scheduler_topic, args)
+
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance.
@@ -394,6 +424,45 @@ class API(base.Base):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
+ def revert_resize(self, context, instance_id):
+ """Reverts a resize, deleting the 'new' instance in the process"""
+ context = context.elevated()
+ migration_ref = self.db.migration_get_by_instance_and_status(context,
+ instance_id, 'finished')
+ if not migration_ref:
+ raise exception.NotFound(_("No finished migrations found for "
+ "instance"))
+
+ params = {'migration_id': migration_ref['id']}
+ self._cast_compute_message('revert_resize', context, instance_id,
+ migration_ref['dest_compute'], params=params)
+
+ def confirm_resize(self, context, instance_id):
+ """Confirms a migration/resize, deleting the 'old' instance in the
+ process."""
+ context = context.elevated()
+ migration_ref = self.db.migration_get_by_instance_and_status(context,
+ instance_id, 'finished')
+ if not migration_ref:
+ raise exception.NotFound(_("No finished migrations found for "
+ "instance"))
+ instance_ref = self.db.instance_get(context, instance_id)
+ params = {'migration_id': migration_ref['id']}
+ self._cast_compute_message('confirm_resize', context, instance_id,
+ migration_ref['source_compute'], params=params)
+
+ self.db.migration_update(context, migration_id,
+ {'status': 'confirmed'})
+ self.db.instance_update(context, instance_id,
+ {'host': migration_ref['dest_compute'], })
+
+ def resize(self, context, instance_id, flavor):
+ """Resize a running instance."""
+ self._cast_scheduler_message(context,
+ {"method": "prep_resize",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id, }},)
+
def pause(self, context, instance_id):
"""Pause the given instance."""
self._cast_compute_message('pause_instance', context, instance_id)
@@ -447,7 +516,7 @@ class API(base.Base):
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
'port': output['port']}})
- return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
+ return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
def get_console_output(self, context, instance_id):
@@ -476,6 +545,13 @@ class API(base.Base):
"""
self._cast_compute_message('reset_network', context, instance_id)
+ def inject_network_info(self, context, instance_id):
+ """
+ Inject network info for the instance.
+
+ """
+ self._cast_compute_message('inject_network_info', context, instance_id)
+
def attach_volume(self, context, instance_id, volume_id, device):
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 309313fd0..fa02a5dfa 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -4,6 +4,7 @@
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -21,30 +22,120 @@
The built-in instance properties.
"""
-from nova import flags
+from nova import context
+from nova import db
from nova import exception
+from nova import flags
+from nova import log as logging
FLAGS = flags.FLAGS
-INSTANCE_TYPES = {
- 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
- 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
- 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
- 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
- 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+LOG = logging.getLogger('nova.instance_types')
+
+
+def create(name, memory, vcpus, local_gb, flavorid, swap=0,
+ rxtx_quota=0, rxtx_cap=0):
+ """Creates instance types / flavors
+ arguments: name memory vcpus local_gb flavorid swap rxtx_quota rxtx_cap
+ """
+ for option in [memory, vcpus, local_gb, flavorid]:
+ try:
+ int(option)
+ except ValueError:
+ raise exception.InvalidInputException(
+ _("create arguments must be positive integers"))
+ if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0):
+ raise exception.InvalidInputException(
+ _("create arguments must be positive integers"))
+
+ try:
+ db.instance_type_create(
+ context.get_admin_context(),
+ dict(name=name,
+ memory_mb=memory,
+ vcpus=vcpus,
+ local_gb=local_gb,
+ flavorid=flavorid,
+ swap=swap,
+ rxtx_quota=rxtx_quota,
+ rxtx_cap=rxtx_cap))
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Cannot create instance type: %s" % name))
+
+
+def destroy(name):
+ """Marks instance types / flavors as deleted
+ arguments: name"""
+ if name == None:
+ raise exception.InvalidInputException(_("No instance type specified"))
+ else:
+ try:
+ db.instance_type_destroy(context.get_admin_context(), name)
+ except exception.NotFound:
+ LOG.exception(_('Instance type %s not found for deletion' % name))
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
+
+
+def purge(name):
+ """Removes instance types / flavors from database
+ arguments: name"""
+ if name == None:
+ raise exception.InvalidInputException(_("No instance type specified"))
+ else:
+ try:
+ db.instance_type_purge(context.get_admin_context(), name)
+ except exception.NotFound:
+ LOG.exception(_('Instance type %s not found for purge' % name))
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
+
+
+def get_all_types(inactive=0):
+ """Retrieves non-deleted instance_types.
+ Pass true as argument if you want deleted instance types returned also."""
+ return db.instance_type_get_all(context.get_admin_context(), inactive)
+
+
+def get_all_flavors():
+ """retrieves non-deleted flavors. alias for instance_types.get_all_types().
+ Pass true as argument if you want deleted instance types returned also."""
+ return get_all_types(context.get_admin_context())
+
+
+def get_instance_type(name):
+ """Retrieves single instance type by name"""
+ if name is None:
+ return FLAGS.default_instance_type
+ try:
+ ctxt = context.get_admin_context()
+ inst_type = db.instance_type_get_by_name(ctxt, name)
+ return inst_type
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
def get_by_type(instance_type):
- """Build instance data structure and save it to the data store."""
+ """retrieve instance type name"""
if instance_type is None:
return FLAGS.default_instance_type
- if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError(_("Unknown instance type: %s") % \
- instance_type, "Invalid")
- return instance_type
+
+ try:
+ ctxt = context.get_admin_context()
+ inst_type = db.instance_type_get_by_name(ctxt, instance_type)
+ return inst_type['name']
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Unknown instance type: %s" %\
+ instance_type))
def get_by_flavor_id(flavor_id):
- for instance_type, details in INSTANCE_TYPES.iteritems():
- if details['flavorid'] == flavor_id:
- return instance_type
- return FLAGS.default_instance_type
+ """retrieve instance type's name by flavor_id"""
+ if flavor_id is None:
+ return FLAGS.default_instance_type
+ try:
+ ctxt = context.get_admin_context()
+ flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id)
+ return flavor['name']
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Unknown flavor: %s" % flavor_id))
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b8d4b7ee9..b3e864154 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -370,12 +370,19 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
self.network_manager.setup_compute_network(context, instance_id)
- self.driver.rescue(instance_ref)
+ self.driver.rescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -385,11 +392,18 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- self.driver.unrescue(instance_ref)
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ self.driver.unrescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@staticmethod
@@ -399,6 +413,112 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception
@checks_instance_lock
+ def confirm_resize(self, context, instance_id, migration_id):
+ """Destroys the source instance"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+ self.driver.destroy(instance_ref)
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def revert_resize(self, context, instance_id, migration_id):
+ """Destroys the new instance on the destination machine,
+ reverts the model changes, and powers on the old
+ instance on the source machine"""
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+
+ #TODO(mdietz): we may want to split these into separate methods.
+ if migration_ref['source_compute'] == FLAGS.host:
+ self.driver._start(instance_ref)
+ self.db.migration_update(context, migration_id,
+ {'status': 'reverted'})
+ else:
+ self.driver.destroy(instance_ref)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'revert_resize',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def prep_resize(self, context, instance_id):
+ """Initiates the process of moving a running instance to another
+ host, possibly changing the RAM and disk size in the process"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ if instance_ref['host'] == FLAGS.host:
+ raise exception.Error(_(
+ 'Migration error: destination same as source!'))
+
+ migration_ref = self.db.migration_create(context,
+ {'instance_id': instance_id,
+ 'source_compute': instance_ref['host'],
+ 'dest_compute': FLAGS.host,
+ 'dest_host': self.driver.get_host_ip_addr(),
+ 'status': 'pre-migrating'})
+ LOG.audit(_('instance %s: migrating to '), instance_id,
+ context=context)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'resize_instance',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def resize_instance(self, context, instance_id, migration_id):
+ """Starts the migration of a running instance to another host"""
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+ self.db.migration_update(context, migration_id,
+ {'status': 'migrating', })
+
+ disk_info = self.driver.migrate_disk_and_power_off(instance_ref,
+ migration_ref['dest_host'])
+ self.db.migration_update(context, migration_id,
+ {'status': 'post-migrating', })
+
+ #TODO(mdietz): This is where we would update the VM record
+ #after resizing
+ service = self.db.service_get_by_host_and_topic(context,
+ migration_ref['dest_compute'], FLAGS.compute_topic)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ migration_ref['dest_compute'])
+ rpc.cast(context, topic,
+ {'method': 'finish_resize',
+ 'args': {
+ 'migration_id': migration_id,
+ 'instance_id': instance_id,
+ 'disk_info': disk_info, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def finish_resize(self, context, instance_id, migration_id, disk_info):
+ """Completes the migration process by setting up the newly transferred
+ disk and turning on the instance on its new host machine"""
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get(context,
+ migration_ref['instance_id'])
+
+ # this may get passed into the following spawn instead
+ new_disk_info = self.driver.attach_disk(instance_ref, disk_info)
+ self.driver.spawn(instance_ref, disk=new_disk_info)
+
+ self.db.migration_update(context, migration_id,
+ {'status': 'finished', })
+
+ @exception.wrap_exception
+ @checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this server."""
context = context.elevated()
@@ -527,6 +647,18 @@ class ComputeManager(manager.Manager):
context=context)
self.driver.reset_network(instance_ref)
+ @checks_instance_lock
+ def inject_network_info(self, context, instance_id):
+ """
+ Inject network info for the instance.
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.debug(_('instance %s: inject network info'), instance_id,
+ context=context)
+ self.driver.inject_network_info(instance_ref)
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
diff --git a/nova/db/api.py b/nova/db/api.py
index d7f3746d2..2ecfc0211 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -80,13 +80,18 @@ def service_destroy(context, instance_id):
def service_get(context, service_id):
- """Get an service or raise if it does not exist."""
+ """Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
+def service_get_by_host_and_topic(context, host, topic):
+ """Get a service by host it's on and topic it listens to"""
+ return IMPL.service_get_by_host_and_topic(context, host, topic)
+
+
def service_get_all(context, disabled=False):
- """Get all service."""
- return IMPL.service_get_all(context, None, disabled)
+ """Get all services."""
+ return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
@@ -254,6 +259,28 @@ def floating_ip_get_by_address(context, address):
####################
+def migration_update(context, id, values):
+ """Update a migration instance"""
+ return IMPL.migration_update(context, id, values)
+
+
+def migration_create(context, values):
+ """Create a migration record"""
+ return IMPL.migration_create(context, values)
+
+
+def migration_get(context, migration_id):
+ """Finds a migration by the id"""
+ return IMPL.migration_get(context, migration_id)
+
+
+def migration_get_by_instance_and_status(context, instance_id, status):
+ """Finds a migration by the instance id its migrating"""
+ return IMPL.migration_get_by_instance_and_status(context, instance_id,
+ status)
+
+####################
+
def fixed_ip_associate(context, address, instance_id):
"""Associate fixed ip to instance.
@@ -630,19 +657,24 @@ def iscsi_target_create_safe(context, values):
###############
-def auth_destroy_token(context, token):
+def auth_token_destroy(context, token_id):
"""Destroy an auth token."""
- return IMPL.auth_destroy_token(context, token)
+ return IMPL.auth_token_destroy(context, token_id)
-def auth_get_token(context, token_hash):
+def auth_token_get(context, token_hash):
"""Retrieves a token given the hash representing it."""
- return IMPL.auth_get_token(context, token_hash)
+ return IMPL.auth_token_get(context, token_hash)
-def auth_create_token(context, token):
+def auth_token_update(context, token_hash, values):
+ """Updates a token given the hash representing it."""
+ return IMPL.auth_token_update(context, token_hash, values)
+
+
+def auth_token_create(context, token):
"""Creates a new token."""
- return IMPL.auth_create_token(context, token)
+ return IMPL.auth_token_create(context, token)
###################
@@ -1002,6 +1034,41 @@ def console_get(context, console_id, instance_id=None):
return IMPL.console_get(context, console_id, instance_id)
+ ##################
+
+
+def instance_type_create(context, values):
+ """Create a new instance type"""
+ return IMPL.instance_type_create(context, values)
+
+
+def instance_type_get_all(context, inactive=0):
+ """Get all instance types"""
+ return IMPL.instance_type_get_all(context, inactive)
+
+
+def instance_type_get_by_name(context, name):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_name(context, name)
+
+
+def instance_type_get_by_flavor_id(context, id):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_flavor_id(context, id)
+
+
+def instance_type_destroy(context, name):
+ """Delete a instance type"""
+ return IMPL.instance_type_destroy(context, name)
+
+
+def instance_type_purge(context, name):
+ """Purges (removes) an instance type from DB
+ Use instance_type_destroy for most cases
+ """
+ return IMPL.instance_type_purge(context, name)
+
+
####################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 2697fac73..5e498fc6f 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -136,15 +136,12 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all(context, session=None, disabled=False):
- if not session:
- session = get_session()
-
- result = session.query(models.Service).\
+def service_get_all(context, disabled=False):
+ session = get_session()
+ return session.query(models.Service).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(disabled=disabled).\
all()
- return result
@require_admin_context
@@ -158,6 +155,17 @@ def service_get_all_by_topic(context, topic):
@require_admin_context
+def service_get_by_host_and_topic(context, host, topic):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ first()
+
+
+@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
return session.query(models.Service).\
@@ -715,6 +723,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -723,6 +732,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
+ options(joinedload('metadata')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
@@ -1046,7 +1056,8 @@ def network_create_safe(context, values):
@require_admin_context
def network_disassociate(context, network_id):
- network_update(context, network_id, {'project_id': None})
+ network_update(context, network_id, {'project_id': None,
+ 'host': None})
@require_admin_context
@@ -1262,16 +1273,20 @@ def iscsi_target_create_safe(context, values):
@require_admin_context
-def auth_destroy_token(_context, token):
+def auth_token_destroy(context, token_id):
session = get_session()
- session.delete(token)
+ with session.begin():
+ token_ref = auth_token_get(context, token_id, session=session)
+ token_ref.delete(session=session)
@require_admin_context
-def auth_get_token(_context, token_hash):
- session = get_session()
+def auth_token_get(context, token_hash, session=None):
+ if session is None:
+ session = get_session()
tk = session.query(models.AuthToken).\
filter_by(token_hash=token_hash).\
+ filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
raise exception.NotFound(_('Token %s does not exist') % token_hash)
@@ -1279,7 +1294,16 @@ def auth_get_token(_context, token_hash):
@require_admin_context
-def auth_create_token(_context, token):
+def auth_token_update(context, token_hash, values):
+ session = get_session()
+ with session.begin():
+ token_ref = auth_token_get(context, token_hash, session=session)
+ token_ref.update(values)
+ token_ref.save(session=session)
+
+
+@require_admin_context
+def auth_token_create(_context, token):
tk = models.AuthToken()
tk.update(token)
tk.save()
@@ -1959,6 +1983,51 @@ def host_get_networks(context, host):
all()
+###################
+
+
+@require_admin_context
+def migration_create(context, values):
+ migration = models.Migration()
+ migration.update(values)
+ migration.save()
+ return migration
+
+
+@require_admin_context
+def migration_update(context, id, values):
+ session = get_session()
+ with session.begin():
+ migration = migration_get(context, id, session=session)
+ migration.update(values)
+ migration.save(session=session)
+ return migration
+
+
+@require_admin_context
+def migration_get(context, id, session=None):
+ if not session:
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(id=id).first()
+ if not result:
+ raise exception.NotFound(_("No migration found with id %s")
+ % migration_id)
+ return result
+
+
+@require_admin_context
+def migration_get_by_instance_and_status(context, instance_id, status):
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(instance_id=instance_id).\
+ filter_by(status=status).first()
+ if not result:
+ raise exception.NotFound(_("No migration found with instance id %s")
+ % migration_id)
+ return result
+
+
##################
@@ -2060,6 +2129,98 @@ def console_get(context, console_id, instance_id=None):
return result
+ ##################
+
+
+@require_admin_context
+def instance_type_create(_context, values):
+ try:
+ instance_type_ref = models.InstanceTypes()
+ instance_type_ref.update(values)
+ instance_type_ref.save()
+ except:
+ raise exception.DBError
+ return instance_type_ref
+
+
+@require_context
+def instance_type_get_all(context, inactive=0):
+ """
+ Returns a dict describing all instance_types with name as key.
+ """
+ session = get_session()
+ if inactive:
+ inst_types = session.query(models.InstanceTypes).\
+ order_by("name").\
+ all()
+ else:
+ inst_types = session.query(models.InstanceTypes).\
+ filter_by(deleted=inactive).\
+ order_by("name").\
+ all()
+ if inst_types:
+ inst_dict = {}
+ for i in inst_types:
+ inst_dict[i['name']] = dict(i)
+ return inst_dict
+ else:
+ raise exception.NotFound
+
+
+@require_context
+def instance_type_get_by_name(context, name):
+ """Returns a dict describing specific instance_type"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(name=name).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No instance type with name %s") % name)
+ else:
+ return dict(inst_type)
+
+
+@require_context
+def instance_type_get_by_flavor_id(context, id):
+ """Returns a dict describing specific flavor_id"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(flavorid=int(id)).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No flavor with name %s") % id)
+ else:
+ return dict(inst_type)
+
+
+@require_admin_context
+def instance_type_destroy(context, name):
+ """ Marks specific instance_type as deleted"""
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.update(dict(deleted=1))
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
+@require_admin_context
+def instance_type_purge(context, name):
+ """ Removes specific instance_type from DB
+ Usually instance_type_destroy should be used
+ """
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.delete()
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
####################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
new file mode 100644
index 000000000..4cb07e0d8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+quotas = Table('quotas', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+
+instance_metadata_table = Table('instance_metadata', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+#
+# New columns
+#
+quota_metadata_items = Column('metadata_items', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (instance_metadata_table, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ quotas.create_column(quota_metadata_items)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
new file mode 100644
index 000000000..705fc8ff3
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+volumes_provider_location = Column('provider_location',
+ String(length=256,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+volumes_provider_auth = Column('provider_auth',
+ String(length=256,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ volumes.create_column(volumes_provider_location)
+ volumes.create_column(volumes_provider_auth)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
new file mode 100644
index 000000000..427934d53
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -0,0 +1,90 @@
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+fixed_ips = Table(
+ "fixed_ips",
+ meta,
+ Column(
+ "id",
+ Integer(),
+ primary_key=True,
+ nullable=False))
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+fixed_ips_addressV6 = Column(
+ "addressV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_netmaskV6 = Column(
+ "netmaskV6",
+ String(
+ length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_gatewayV6 = Column(
+ "gatewayV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ fixed_ips.create_column(fixed_ips_addressV6)
+ fixed_ips.create_column(fixed_ips_netmaskV6)
+ fixed_ips.create_column(fixed_ips_gatewayV6)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
new file mode 100644
index 000000000..66609054e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import api
+from nova import db
+from nova import log as logging
+
+import datetime
+
+meta = MetaData()
+
+
+#
+# New Tables
+#
+instance_types = Table('instance_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('vcpus', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('flavorid', Integer(), nullable=False, unique=True),
+ Column('swap', Integer(), nullable=False, default=0),
+ Column('rxtx_quota', Integer(), nullable=False, default=0),
+ Column('rxtx_cap', Integer(), nullable=False, default=0))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here
+ # Don't create your own engine; bind migrate_engine
+ # to your metadata
+ meta.bind = migrate_engine
+ try:
+ instance_types.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating instance_types table')
+ raise
+
+ # Here are the old static instance types
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ try:
+ i = instance_types.insert()
+ for name, values in INSTANCE_TYPES.iteritems():
+ # FIXME(kpepple) should we be seeding created_at / updated_at ?
+ # now = datetime.datatime.utcnow()
+ i.execute({'name': name, 'memory_mb': values["memory_mb"],
+ 'vcpus': values["vcpus"], 'deleted': 0,
+ 'local_gb': values["local_gb"],
+ 'flavorid': values["flavorid"]})
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while seeding instance_types table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (instance_types):
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
new file mode 100644
index 000000000..4fda525f1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+migrations = Table('migrations', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('source_compute', String(255)),
+ Column('dest_compute', String(255)),
+ Column('dest_host', String(255)),
+ Column('instance_id', Integer, ForeignKey('instances.id'),
+ nullable=True),
+ Column('status', String(255)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (migrations, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 9bdaa6d6b..d9e303599 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -60,7 +60,7 @@ def db_version():
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
- 'services',
+ 'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 40a96fc17..6ef284e65 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -126,11 +126,16 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
+ onset_files = []
+
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
- return FLAGS.instance_name_template % self.id
+ base_name = FLAGS.instance_name_template % self.id
+ if getattr(self, '_rescue', False):
+ base_name += "-rescue"
+ return base_name
admin_pass = Column(String(255))
user_id = Column(String(255))
@@ -210,6 +215,20 @@ class InstanceActions(BASE, NovaBase):
error = Column(Text)
+class InstanceTypes(BASE, NovaBase):
+ """Represent possible instance_types or flavor of VM offered"""
+ __tablename__ = "instance_types"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), unique=True)
+ memory_mb = Column(Integer)
+ vcpus = Column(Integer)
+ local_gb = Column(Integer)
+ flavorid = Column(Integer, unique=True)
+ swap = Column(Integer, nullable=False, default=0)
+ rxtx_quota = Column(Integer, nullable=False, default=0)
+ rxtx_cap = Column(Integer, nullable=False, default=0)
+
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
@@ -243,6 +262,9 @@ class Volume(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ provider_location = Column(String(255))
+ provider_auth = Column(String(255))
+
class Quota(BASE, NovaBase):
"""Represents quota overrides for a project."""
@@ -256,6 +278,7 @@ class Quota(BASE, NovaBase):
volumes = Column(Integer)
gigabytes = Column(Integer)
floating_ips = Column(Integer)
+ metadata_items = Column(Integer)
class ExportDevice(BASE, NovaBase):
@@ -366,6 +389,18 @@ class KeyPair(BASE, NovaBase):
public_key = Column(Text)
+class Migration(BASE, NovaBase):
+ """Represents a running host-to-host migration."""
+ __tablename__ = 'migrations'
+ id = Column(Integer, primary_key=True, nullable=False)
+ source_compute = Column(String(255))
+ dest_compute = Column(String(255))
+ dest_host = Column(String(255))
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
+ #TODO(_cerberus_): enum
+ status = Column(String(255))
+
+
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
@@ -433,6 +468,9 @@ class FixedIp(BASE, NovaBase):
allocated = Column(Boolean, default=False)
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
+ addressV6 = Column(String(255))
+ netmaskV6 = Column(String(3))
+ gatewayV6 = Column(String(255))
class User(BASE, NovaBase):
@@ -536,6 +574,20 @@ class Console(BASE, NovaBase):
pool = relationship(ConsolePool, backref=backref('consoles'))
+class InstanceMetadata(BASE, NovaBase):
+ """Represents a metadata key/value pair for an instance"""
+ __tablename__ = 'instance_metadata'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance = relationship(Instance, backref="metadata",
+ foreign_keys=instance_id,
+ primaryjoin='and_('
+ 'InstanceMetadata.instance_id == Instance.id,'
+ 'InstanceMetadata.deleted == False)')
+
+
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -553,11 +605,12 @@ def register_models():
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
- models = (Service, Instance, InstanceActions,
+ models = (Service, Instance, InstanceActions, InstanceTypes,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate, ConsolePool, Console, Zone)
+ Project, Certificate, ConsolePool, Console, Zone,
+ InstanceMetadata, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/flags.py b/nova/flags.py
index f64a62da9..8cf199b2f 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -160,9 +160,45 @@ class StrWrapper(object):
raise KeyError(name)
-FLAGS = FlagValues()
-gflags.FLAGS = FLAGS
-gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
+# Copied from gflags with small mods to get the naming correct.
+# Originally gflags checks for the first module that is not gflags that is
+# in the call chain, we want to check for the first module that is not gflags
+# and not this module.
+def _GetCallingModule():
+ """Returns the name of the module that's calling into this module.
+
+ We generally use this function to get the name of the module calling a
+ DEFINE_foo... function.
+ """
+ # Walk down the stack to find the first globals dict that's not ours.
+ for depth in range(1, sys.getrecursionlimit()):
+ if not sys._getframe(depth).f_globals is globals():
+ module_name = __GetModuleName(sys._getframe(depth).f_globals)
+ if module_name == 'gflags':
+ continue
+ if module_name is not None:
+ return module_name
+ raise AssertionError("No module was found")
+
+
+# Copied from gflags because it is a private function
+def __GetModuleName(globals_dict):
+ """Given a globals dict, returns the name of the module that defines it.
+
+ Args:
+ globals_dict: A dictionary that should correspond to an environment
+ providing the values of the globals.
+
+ Returns:
+ A string (the name of the module) or None (if the module could not
+ be identified.
+ """
+ for name, module in sys.modules.iteritems():
+ if getattr(module, '__dict__', None) is globals_dict:
+ if name == '__main__':
+ return sys.argv[0]
+ return name
+ return None
def _wrapper(func):
@@ -173,6 +209,11 @@ def _wrapper(func):
return _wrapped
+FLAGS = FlagValues()
+gflags.FLAGS = FLAGS
+gflags._GetCallingModule = _GetCallingModule
+
+
DEFINE = _wrapper(gflags.DEFINE)
DEFINE_string = _wrapper(gflags.DEFINE_string)
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
@@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
-
-
HelpFlag = gflags.HelpFlag
HelpshortFlag = gflags.HelpshortFlag
HelpXMLFlag = gflags.HelpXMLFlag
@@ -285,8 +324,9 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
+DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
DEFINE_string('sql_connection',
- 'sqlite:///$state_path/nova.sqlite',
+ 'sqlite:///$state_path/$sqlite_db',
'connection string for sql database')
DEFINE_integer('sql_idle_timeout',
3600,
diff --git a/nova/log.py b/nova/log.py
index 10c14d74b..87a21ddb4 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -54,7 +54,7 @@ flags.DEFINE_string('logging_default_format_string',
'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix',
- 'from %(processName)s (pid=%(process)d) %(funcName)s'
+ 'from (pid=%(process)d) %(funcName)s'
' %(pathname)s:%(lineno)d',
'data to append to log format when level is DEBUG')
@@ -236,16 +236,17 @@ class NovaRootLogger(NovaLogger):
def __init__(self, name, level=NOTSET):
self.logpath = None
self.filelog = None
- self.syslog = SysLogHandler(address='/dev/log')
self.streamlog = StreamHandler()
+ self.syslog = None
NovaLogger.__init__(self, name, level)
def setup_from_flags(self):
"""Setup logger from flags"""
global _filelog
if FLAGS.use_syslog:
+ self.syslog = SysLogHandler(address='/dev/log')
self.addHandler(self.syslog)
- else:
+ elif self.syslog:
self.removeHandler(self.syslog)
logpath = _get_log_file_path()
if logpath:
diff --git a/nova/network/manager.py b/nova/network/manager.py
index c6eba225e..b36dd59cf 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -163,11 +163,22 @@ class NetworkManager(manager.Manager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
- raise NotImplementedError()
+ # TODO(vish): when this is called by compute, we can associate compute
+ # with a network, or a cluster of computes with a network
+ # and use that network here with a method like
+ # network_get_by_compute_host
+ network_ref = self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+ address = self.db.fixed_ip_associate_pool(context.elevated(),
+ network_ref['id'],
+ instance_id)
+ self.db.fixed_ip_update(context, address, {'allocated': True})
+ return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool."""
- raise NotImplementedError()
+ self.db.fixed_ip_update(context, address, {'allocated': False})
+ self.db.fixed_ip_disassociate(context.elevated(), address)
def setup_fixed_ip(self, context, address):
"""Sets up rules for fixed ip."""
@@ -257,12 +268,58 @@ class NetworkManager(manager.Manager):
def get_network_host(self, context):
"""Get the network host for the current context."""
- raise NotImplementedError()
+ network_ref = self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+ # NOTE(vish): If the network has no host, use the network_host flag.
+ # This could eventually be a a db lookup of some sort, but
+ # a flag is easy to handle for now.
+ host = network_ref['host']
+ if not host:
+ topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ FLAGS.network_host)
+ if FLAGS.fake_call:
+ return self.set_network_host(context, network_ref['id'])
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return host
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, *args, **kwargs):
+ cidr_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
- raise NotImplementedError()
+ fixed_net = IPy.IP(cidr)
+ fixed_net_v6 = IPy.IP(cidr_v6)
+ significant_bits_v6 = 64
+ count = 1
+ for index in range(num_networks):
+ start = index * network_size
+ significant_bits = 32 - int(math.log(network_size, 2))
+ cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['bridge'] = FLAGS.flat_network_bridge
+ net['dns'] = FLAGS.flat_network_dns
+ net['cidr'] = cidr
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['dhcp_start'] = str(project_net[2])
+ if num_networks > 1:
+ net['label'] = "%s_%d" % (label, count)
+ else:
+ net['label'] = label
+ count += 1
+
+ if(FLAGS.use_ipv6):
+ cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
+ net['cidr_v6'] = cidr_v6
+
+ network_ref = self.db.network_create_safe(context, net)
+
+ if network_ref:
+ self._create_fixed_ips(context, network_ref['id'])
@property
def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
@@ -322,83 +379,20 @@ class FlatManager(NetworkManager):
"""
timeout_fixed_ips = False
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool."""
- # TODO(vish): when this is called by compute, we can associate compute
- # with a network, or a cluster of computes with a network
- # and use that network here with a method like
- # network_get_by_compute_host
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network_ref['id'],
- instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
- return address
-
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context.elevated(), address)
+ def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ #Fix for bug 723298 - do not call init_host on superclass
+ #Following code has been copied for NetworkManager.init_host
+ ctxt = context.get_admin_context()
+ for network in self.db.host_get_networks(ctxt, self.host):
+ self._on_set_network_host(ctxt, network['id'])
def setup_compute_network(self, context, instance_id):
"""Network is created manually."""
pass
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, label, *args, **kwargs):
- """Create networks based on parameters."""
- fixed_net = IPy.IP(cidr)
- fixed_net_v6 = IPy.IP(cidr_v6)
- significant_bits_v6 = 64
- count = 1
- for index in range(num_networks):
- start = index * network_size
- significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
- net = {}
- net['bridge'] = FLAGS.flat_network_bridge
- net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['dhcp_start'] = str(project_net[2])
- if num_networks > 1:
- net['label'] = "%s_%d" % (label, count)
- else:
- net['label'] = label
- count += 1
-
- if(FLAGS.use_ipv6):
- cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
- net['cidr_v6'] = cidr_v6
-
- network_ref = self.db.network_create_safe(context, net)
-
- if network_ref:
- self._create_fixed_ips(context, network_ref['id'])
-
- def get_network_host(self, context):
- """Get the network host for the current context."""
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- # NOTE(vish): If the network has no host, use the network_host flag.
- # This could eventually be a a db lookup of some sort, but
- # a flag is easy to handle for now.
- host = network_ref['host']
- if not host:
- topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- FLAGS.network_host)
- if FLAGS.fake_call:
- return self.set_network_host(context, network_ref['id'])
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return host
-
def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a network."""
net = {}
@@ -406,8 +400,24 @@ class FlatManager(NetworkManager):
net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net)
+ def allocate_floating_ip(self, context, project_id):
+ #Fix for bug 723298
+ raise NotImplementedError()
-class FlatDHCPManager(FlatManager):
+ def associate_floating_ip(self, context, floating_address, fixed_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def disassociate_floating_ip(self, context, floating_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def deallocate_floating_ip(self, context, floating_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+
+class FlatDHCPManager(NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
@@ -521,6 +531,11 @@ class VlanManager(NetworkManager):
' than 4094'))
fixed_net = IPy.IP(cidr)
+ if fixed_net.len() < num_networks * network_size:
+ raise ValueError(_('The network range is not big enough to fit '
+ '%(num_networks)s. Network size is %(network_size)s' %
+ locals()))
+
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
diff --git a/nova/quota.py b/nova/quota.py
index 3884eb308..6b52a97fa 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -35,6 +35,8 @@ flags.DEFINE_integer('quota_gigabytes', 1000,
'number of volume gigabytes allowed per project')
flags.DEFINE_integer('quota_floating_ips', 10,
'number of floating ips allowed per project')
+flags.DEFINE_integer('quota_metadata_items', 128,
+ 'number of metadata items allowed per instance')
def get_quota(context, project_id):
@@ -42,7 +44,8 @@ def get_quota(context, project_id):
'cores': FLAGS.quota_cores,
'volumes': FLAGS.quota_volumes,
'gigabytes': FLAGS.quota_gigabytes,
- 'floating_ips': FLAGS.quota_floating_ips}
+ 'floating_ips': FLAGS.quota_floating_ips,
+ 'metadata_items': FLAGS.quota_metadata_items}
try:
quota = db.quota_get(context, project_id)
for key in rval.keys():
@@ -94,6 +97,15 @@ def allowed_floating_ips(context, num_floating_ips):
return min(num_floating_ips, allowed_floating_ips)
+def allowed_metadata_items(context, num_metadata_items):
+ """Check quota; return min(num_metadata_items,allowed_metadata_items)"""
+ project_id = context.project_id
+ context = context.elevated()
+ quota = get_quota(context, project_id)
+ num_allowed_metadata_items = quota['metadata_items']
+ return min(num_metadata_items, num_allowed_metadata_items)
+
+
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
diff --git a/nova/rpc.py b/nova/rpc.py
index 205bb524a..fbb90299b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -91,18 +91,19 @@ class Consumer(messaging.Consumer):
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
- except: # Catching all because carrot sucks
+ except Exception as e: # Catching all because carrot sucks
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval
- LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
- " unreachable. Trying again in %(fl_intv)d seconds.")
+ LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable: %(e)s. Trying again in %(fl_intv)d"
+ " seconds.")
% locals())
self.failed_connection = True
if self.failed_connection:
- LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
- FLAGS.rabbit_max_retries)
+ LOG.error(_("Unable to connect to AMQP server "
+ "after %d tries. Shutting down."),
+ FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -122,7 +123,7 @@ class Consumer(messaging.Consumer):
LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
- # exceptions to be logged 10 times a second if some
+ # want exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
diff --git a/nova/service.py b/nova/service.py
index cc88ac233..8fdaca0a5 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -39,20 +40,21 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import version
-
+from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
lower_bound=1)
-
flags.DEFINE_integer('periodic_interval', 60,
'seconds between running periodic tasks',
lower_bound=1)
-
-flags.DEFINE_flag(flags.HelpFlag())
-flags.DEFINE_flag(flags.HelpshortFlag())
-flags.DEFINE_flag(flags.HelpXMLFlag())
+flags.DEFINE_string('ec2_listen', "0.0.0.0",
+ 'IP address for EC2 API to listen')
+flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
+flags.DEFINE_string('osapi_listen', "0.0.0.0",
+ 'IP address for OpenStack API to listen')
+flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
class Service(object):
@@ -64,6 +66,8 @@ class Service(object):
self.binary = binary
self.topic = topic
self.manager_class_name = manager
+ manager_class = utils.import_class(self.manager_class_name)
+ self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
super(Service, self).__init__(*args, **kwargs)
@@ -71,9 +75,9 @@ class Service(object):
self.timers = []
def start(self):
- manager_class = utils.import_class(self.manager_class_name)
- self.manager = manager_class(host=self.host, *self.saved_args,
- **self.saved_kwargs)
+ vcs_string = version.version_string_with_vcs()
+ logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"),
+ {'topic': self.topic, 'vcs_string': vcs_string})
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
@@ -153,9 +157,6 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- vcs_string = version.version_string_with_vcs()
- logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)")
- % locals())
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -216,9 +217,48 @@ class Service(object):
logging.exception(_("model server went away"))
+class WsgiService(object):
+ """Base class for WSGI based services."""
+
+ def __init__(self, conf, apis):
+ self.conf = conf
+ self.apis = apis
+ self.wsgi_app = None
+
+ def start(self):
+ self.wsgi_app = _run_wsgi(self.conf, self.apis)
+
+ def wait(self):
+ self.wsgi_app.wait()
+
+
+class ApiService(WsgiService):
+ """Class for our nova-api service"""
+ @classmethod
+ def create(cls, conf=None):
+ if not conf:
+ conf = wsgi.paste_config_file('nova-api.conf')
+ if not conf:
+ raise exception.Error(_("Cannot load nova-api.conf"))
+ api_endpoints = ['ec2', 'osapi']
+ service = cls(conf, api_endpoints)
+ return service
+
+
def serve(*services):
- if not services:
- services = [Service.create()]
+ try:
+ if not services:
+ services = [Service.create()]
+ except Exception:
+ logging.exception('in Service.create()')
+ raise
+ finally:
+ # After we've loaded up all our dynamic bits, check
+ # whether we should print help
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
@@ -231,6 +271,49 @@ def serve(*services):
x.start()
+def serve_wsgi(cls, conf):
+ try:
+ service = cls.create(conf)
+ except Exception:
+ logging.exception('in WsgiService.create()')
+ raise
+ finally:
+ # After we've loaded up all our dynamic bits, check
+ # whether we should print help
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
+
+ service.start()
+
+ return service
+
+
def wait():
while True:
greenthread.sleep(5)
+
+
+def _run_wsgi(paste_config_file, apis):
+ logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
+ apps = []
+ for api in apis:
+ config = wsgi.load_paste_configuration(paste_config_file, api)
+ if config is None:
+ logging.debug(_("No paste configuration for app: %s"), api)
+ continue
+ logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
+ logging.info(_("Running %s API"), api)
+ app = wsgi.load_paste_app(paste_config_file, api)
+ apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
+ getattr(FLAGS, "%s_listen" % api)))
+ if len(apps) == 0:
+ logging.error(_("No known API applications configured in %s."),
+ paste_config_file)
+ return
+
+ server = wsgi.Server()
+ for app in apps:
+ server.start(*app)
+ return server
diff --git a/nova/test.py b/nova/test.py
index e0e203647..d8a47464f 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -22,26 +22,28 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
+
import datetime
+import os
+import shutil
import uuid
import unittest
import mox
+import shutil
import stubout
from nova import context
from nova import db
from nova import fakerabbit
from nova import flags
-from nova import log as logging
from nova import rpc
from nova import service
-from nova.network import manager as network_manager
FLAGS = flags.FLAGS
-flags.DEFINE_bool('flush_db', True,
- 'Flush the database before running fake tests')
+flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite',
+ 'File name of clean sqlite db')
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
@@ -66,15 +68,8 @@ class TestCase(unittest.TestCase):
# now that we have some required db setup for the system
# to work properly.
self.start = datetime.datetime.utcnow()
- ctxt = context.get_admin_context()
- if db.network_count(ctxt) != 5:
- network_manager.VlanManager().create_networks(ctxt,
- FLAGS.fixed_range,
- 5, 16,
- FLAGS.fixed_range_v6,
- FLAGS.vlan_start,
- FLAGS.vpn_start,
- )
+ shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
+ os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
@@ -96,17 +91,6 @@ class TestCase(unittest.TestCase):
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
- try:
- # Clean up any ips associated during the test.
- ctxt = context.get_admin_context()
- db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
- self.start)
- db.network_disassociate_all(ctxt)
-
- db.security_group_destroy_all(ctxt)
- except Exception:
- pass
-
# Clean out fake_rabbit's queue if we used it
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 592d5bea9..7fba02a93 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -37,5 +37,30 @@ setattr(__builtin__, '_', lambda x: x)
def setup():
+ import os
+ import shutil
+
+ from nova import context
+ from nova import flags
from nova.db import migration
+ from nova.network import manager as network_manager
+ from nova.tests import fake_flags
+
+ FLAGS = flags.FLAGS
+
+ testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
+ if os.path.exists(testdb):
+ os.unlink(testdb)
migration.db_sync()
+ ctxt = context.get_admin_context()
+ network_manager.VlanManager().create_networks(ctxt,
+ FLAGS.fixed_range,
+ FLAGS.num_networks,
+ FLAGS.network_size,
+ FLAGS.fixed_range_v6,
+ FLAGS.vlan_start,
+ FLAGS.vpn_start,
+ )
+
+ cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
+ shutil.copyfile(testdb, cleandb)
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index 77b1dd37f..e18120285 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -16,7 +16,7 @@
# under the License.
import webob.dec
-import unittest
+from nova import test
from nova import context
from nova import flags
@@ -33,7 +33,7 @@ def simple_wsgi(req):
return ""
-class RateLimitingMiddlewareTest(unittest.TestCase):
+class RateLimitingMiddlewareTest(test.TestCase):
def test_get_action_name(self):
middleware = RateLimitingMiddleware(simple_wsgi)
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
new file mode 100644
index 000000000..3f9c7d3cf
--- /dev/null
+++ b/nova/tests/api/openstack/common.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import webob
+
+
+def webob_factory(url):
+ """Factory for removing duplicate webob code from tests"""
+
+ base_url = url
+
+ def web_request(url, method=None, body=None):
+ req = webob.Request.blank("%s%s" % (base_url, url))
+ if method:
+ req.method = method
+ if body:
+ req.body = json.dumps(body)
+ return req
+ return web_request
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index e0b7b8029..49ce8c1b5 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -188,7 +188,11 @@ def stub_out_glance(stubs, initial_fixtures=None):
class FakeToken(object):
+ id = 0
+
def __init__(self, **kwargs):
+ FakeToken.id += 1
+ self.id = FakeToken.id
for k, v in kwargs.iteritems():
setattr(self, k, v)
@@ -203,26 +207,28 @@ class FakeAuthDatabase(object):
data = {}
@staticmethod
- def auth_get_token(context, token_hash):
+ def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
- def auth_create_token(context, token):
+ def auth_token_create(context, token):
fake_token = FakeToken(created_at=datetime.datetime.now(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
+ FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
- def auth_destroy_token(context, token):
- if token.token_hash in FakeAuthDatabase.data:
- del FakeAuthDatabase.data['token_hash']
+ def auth_token_destroy(context, token_id):
+ token = FakeAuthDatabase.data.get('id_%i' % token_id)
+ if token and token.token_hash in FakeAuthDatabase.data:
+ del FakeAuthDatabase.data[token.token_hash]
+ del FakeAuthDatabase.data['id_%i' % token_id]
class FakeAuthManager(object):
auth_data = {}
- def add_user(self, user):
- key = user.id
+ def add_user(self, key, user):
FakeAuthManager.auth_data[key] = user
def get_user(self, uid):
@@ -235,10 +241,7 @@ class FakeAuthManager(object):
return None
def get_user_from_access_key(self, key):
- for k, v in FakeAuthManager.auth_data.iteritems():
- if v.access == key:
- return v
- return None
+ return FakeAuthManager.auth_data.get(key, None)
class FakeRateLimiter(object):
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
index 73120c31d..dfce1b127 100644
--- a/nova/tests/api/openstack/test_adminapi.py
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -15,13 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import stubout
import webob
from paste import urlmap
from nova import flags
+from nova import test
from nova.api import openstack
from nova.api.openstack import ratelimiting
from nova.api.openstack import auth
@@ -30,9 +30,10 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-class AdminAPITest(unittest.TestCase):
+class AdminAPITest(test.TestCase):
def setUp(self):
+ super(AdminAPITest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -44,6 +45,7 @@ class AdminAPITest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(AdminAPITest, self).tearDown()
def test_admin_enabled(self):
FLAGS.allow_admin_api = True
@@ -58,8 +60,5 @@ class AdminAPITest(unittest.TestCase):
# We should still be able to access public operations.
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
# TODO: Confirm admin operations are unavailable.
-
-if __name__ == '__main__':
- unittest.main()
+ self.assertEqual(res.status_int, 200)
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
index db0fe1060..5112c486f 100644
--- a/nova/tests/api/openstack/test_api.py
+++ b/nova/tests/api/openstack/test_api.py
@@ -15,17 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import webob.exc
import webob.dec
from webob import Request
+from nova import test
from nova.api import openstack
from nova.api.openstack import faults
-class APITest(unittest.TestCase):
+class APITest(test.TestCase):
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index eab78b50c..ff8d42a14 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -16,7 +16,6 @@
# under the License.
import datetime
-import unittest
import stubout
import webob
@@ -27,12 +26,15 @@ import nova.api.openstack.auth
import nova.auth.manager
from nova import auth
from nova import context
+from nova import db
+from nova import test
from nova.tests.api.openstack import fakes
-class Test(unittest.TestCase):
+class Test(test.TestCase):
def setUp(self):
+ super(Test, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
@@ -45,10 +47,11 @@ class Test(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
fakes.fake_data_store = {}
+ super(Test, self).tearDown()
def test_authorize_user(self):
f = fakes.FakeAuthManager()
- f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None))
+ f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
@@ -62,7 +65,7 @@ class Test(unittest.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None))
+ f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'herp'
@@ -97,10 +100,10 @@ class Test(unittest.TestCase):
token_hash=token_hash,
created_at=datetime.datetime(1990, 1, 1))
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy',
destroy_token_mock)
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get',
bad_token)
req = webob.Request.blank('/v1.0/')
@@ -128,8 +131,36 @@ class Test(unittest.TestCase):
self.assertEqual(result.status, '401 Unauthorized')
-class TestLimiter(unittest.TestCase):
+class TestFunctional(test.TestCase):
+ def test_token_expiry(self):
+ ctx = context.get_admin_context()
+ tok = db.auth_token_create(ctx, dict(
+ token_hash='bacon',
+ cdn_management_url='',
+ server_management_url='',
+ storage_url='',
+ user_id='ham',
+ ))
+
+ db.auth_token_update(ctx, tok.token_hash, dict(
+ created_at=datetime.datetime(2000, 1, 1, 12, 0, 0),
+ ))
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'bacon'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_token_doesnotexist(self):
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'ham'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+
+class TestLimiter(test.TestCase):
def setUp(self):
+ super(TestLimiter, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
@@ -141,10 +172,11 @@ class TestLimiter(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
fakes.fake_data_store = {}
+ super(TestLimiter, self).tearDown()
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None))
+ f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
@@ -161,7 +193,3 @@ class TestLimiter(unittest.TestCase):
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 9d9837cc9..92023362c 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -19,14 +19,15 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
-import unittest
+import webob.exc
from webob import Request
+from nova import test
from nova.api.openstack.common import limited
-class LimiterTest(unittest.TestCase):
+class LimiterTest(test.TestCase):
"""
Unit tests for the `nova.api.openstack.common.limited` method which takes
in a list of items and, depending on the 'offset' and 'limit' GET params,
@@ -37,6 +38,7 @@ class LimiterTest(unittest.TestCase):
"""
Run before each test.
"""
+ super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
@@ -159,3 +161,23 @@ class LimiterTest(unittest.TestCase):
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
self.assertEqual(limited(items, req, max_limit=2000), [])
+
+ def test_limiter_negative_limit(self):
+ """
+ Test a negative limit.
+ """
+ def _limit_large():
+ limited(self.large, req, max_limit=2000)
+
+ req = Request.blank('/?limit=-3000')
+ self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
+
+ def test_limiter_negative_offset(self):
+ """
+ Test a negative offset.
+ """
+ def _limit_large():
+ limited(self.large, req, max_limit=2000)
+
+ req = Request.blank('/?offset=-30')
+ self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index fda2b5ede..7667753f4 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -15,15 +15,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import webob
import webob.dec
import webob.exc
+from nova import test
from nova.api.openstack import faults
-class TestFaults(unittest.TestCase):
+class TestFaults(test.TestCase):
def test_fault_parts(self):
req = webob.Request.blank('/.xml')
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 1bdaea161..319767bb5 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -15,34 +15,38 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
import stubout
import webob
+from nova import test
import nova.api
+from nova import context
+from nova import db
from nova.api.openstack import flavors
from nova.tests.api.openstack import fakes
-class FlavorsTest(unittest.TestCase):
+class FlavorsTest(test.TestCase):
def setUp(self):
+ super(FlavorsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
+ self.context = context.get_admin_context()
def tearDown(self):
self.stubs.UnsetAll()
+ super(FlavorsTest, self).tearDown()
def test_get_flavor_list(self):
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
def test_get_flavor_by_id(self):
- pass
-
-if __name__ == '__main__':
- unittest.main()
+ req = webob.Request.blank('/v1.0/flavors/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 8ab4d7569..e232bc3d5 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,7 +22,6 @@ and as a WSGI layer
import json
import datetime
-import unittest
import stubout
import webob
@@ -30,6 +29,7 @@ import webob
from nova import context
from nova import exception
from nova import flags
+from nova import test
from nova import utils
import nova.api.openstack
from nova.api.openstack import images
@@ -130,12 +130,13 @@ class BaseImageServiceTests(object):
self.assertEquals(1, num_images)
-class LocalImageServiceTest(unittest.TestCase,
+class LocalImageServiceTest(test.TestCase,
BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
+ super(LocalImageServiceTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
@@ -145,14 +146,16 @@ class LocalImageServiceTest(unittest.TestCase,
self.service.delete_all()
self.service.delete_imagedir()
self.stubs.UnsetAll()
+ super(LocalImageServiceTest, self).tearDown()
-class GlanceImageServiceTest(unittest.TestCase,
+class GlanceImageServiceTest(test.TestCase,
BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
+ super(GlanceImageServiceTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
@@ -163,9 +166,10 @@ class GlanceImageServiceTest(unittest.TestCase,
def tearDown(self):
self.stubs.UnsetAll()
+ super(GlanceImageServiceTest, self).tearDown()
-class ImageControllerWithGlanceServiceTest(unittest.TestCase):
+class ImageControllerWithGlanceServiceTest(test.TestCase):
"""Test of the OpenStack API /images application controller"""
@@ -194,6 +198,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'image_type': 'ramdisk'}]
def setUp(self):
+ super(ImageControllerWithGlanceServiceTest, self).setUp()
self.orig_image_service = FLAGS.image_service
FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
@@ -208,6 +213,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.image_service = self.orig_image_service
+ super(ImageControllerWithGlanceServiceTest, self).tearDown()
def test_get_image_index(self):
req = webob.Request.blank('/v1.0/images')
diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py
index 4c9d6bc23..9ae90ee20 100644
--- a/nova/tests/api/openstack/test_ratelimiting.py
+++ b/nova/tests/api/openstack/test_ratelimiting.py
@@ -1,15 +1,16 @@
import httplib
import StringIO
import time
-import unittest
import webob
+from nova import test
import nova.api.openstack.ratelimiting as ratelimiting
-class LimiterTest(unittest.TestCase):
+class LimiterTest(test.TestCase):
def setUp(self):
+ super(LimiterTest, self).setUp()
self.limits = {
'a': (5, ratelimiting.PER_SECOND),
'b': (5, ratelimiting.PER_MINUTE),
@@ -83,9 +84,10 @@ class FakeLimiter(object):
return self._delay
-class WSGIAppTest(unittest.TestCase):
+class WSGIAppTest(test.TestCase):
def setUp(self):
+ super(WSGIAppTest, self).setUp()
self.limiter = FakeLimiter(self)
self.app = ratelimiting.WSGIApp(self.limiter)
@@ -206,7 +208,7 @@ def wire_HTTPConnection_to_WSGI(host, app):
httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
-class WSGIAppProxyTest(unittest.TestCase):
+class WSGIAppProxyTest(test.TestCase):
def setUp(self):
"""Our WSGIAppProxy is going to call across an HTTPConnection to a
@@ -218,6 +220,7 @@ class WSGIAppProxyTest(unittest.TestCase):
at the WSGIApp. And the limiter isn't real -- it's a fake that
behaves the way we tell it to.
"""
+ super(WSGIAppProxyTest, self).setUp()
self.limiter = FakeLimiter(self)
app = ratelimiting.WSGIApp(self.limiter)
wire_HTTPConnection_to_WSGI('100.100.100.100:80', app)
@@ -238,7 +241,3 @@ class WSGIAppProxyTest(unittest.TestCase):
self.limiter.mock('murder', 'brutus', None)
self.proxy.perform('stab', 'brutus')
self.assertRaises(AssertionError, shouldRaise)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index a7be0796e..c9566c7e6 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 OpenStack LLC.
+# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -17,18 +17,21 @@
import datetime
import json
-import unittest
import stubout
import webob
from nova import db
from nova import flags
+from nova import test
import nova.api.openstack
from nova.api.openstack import servers
+import nova.compute.api
import nova.db.api
from nova.db.sqlalchemy.models import Instance
+from nova.db.sqlalchemy.models import InstanceMetadata
import nova.rpc
+from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
@@ -64,6 +67,9 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
+ metadata = []
+ metadata.append(InstanceMetadata(key='seq', value=id))
+
if public_addresses == None:
public_addresses = list()
@@ -84,7 +90,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"vcpus": 0,
"local_gb": 0,
"hostname": "",
- "host": "",
+ "host": None,
"instance_type": "",
"user_data": "",
"reservation_id": "",
@@ -95,7 +101,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"availability_zone": "",
"display_name": "server%s" % id,
"display_description": "",
- "locked": False}
+ "locked": False,
+ "metadata": metadata}
instance["fixed_ip"] = {
"address": private_address,
@@ -108,9 +115,10 @@ def fake_compute_api(cls, req, id):
return True
-class ServersTest(unittest.TestCase):
+class ServersTest(test.TestCase):
def setUp(self):
+ super(ServersTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -138,9 +146,12 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
+ self.webreq = common.webob_factory('/v1.0/servers')
+
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(ServersTest, self).tearDown()
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
@@ -214,7 +225,8 @@ class ServersTest(unittest.TestCase):
"get_image_id_from_image_hash", image_id_from_hash)
body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
+ name='server_test', imageId=2, flavorId=2,
+ metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
@@ -289,10 +301,45 @@ class ServersTest(unittest.TestCase):
i = 0
for s in res_dict['servers']:
self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
+ self.assertEqual(s['metadata']['seq'], i)
i += 1
+ def test_get_all_server_details_with_host(self):
+ '''
+ We want to make sure that if two instances are on the same host, then
+ they return the same hostId. If two instances are on different hosts,
+ they should return different hostId's. In this test, there are 5
+ instances - 2 on one host and 3 on another.
+ '''
+
+ def stub_instance(id, user_id=1):
+ return Instance(id=id, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id, host='host%s' % (id % 2))
+
+ def return_servers_with_host(context, user_id=1):
+ return [stub_instance(i) for i in xrange(5)]
+
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ return_servers_with_host)
+
+ req = webob.Request.blank('/v1.0/servers/detail')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s['imageId'], 10)
+
def test_server_pause(self):
FLAGS.allow_admin_api = True
body = dict(server=dict(
@@ -353,6 +400,18 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ def test_server_inject_network_info(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/inject_network_info')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
def test_server_diagnostics(self):
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
req.method = "GET"
@@ -411,6 +470,98 @@ class ServersTest(unittest.TestCase):
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
+ def test_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_bad_flavor_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 422)
+ self.assertEqual(self.resize_called, False)
+
+ def test_resize_raises_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
+
+ def resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_confirm_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
+
+ self.resize_called = False
+
+ def confirm_resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'confirm_resize',
+ confirm_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
+ self.assertEqual(self.resize_called, True)
+
+ def test_confirm_resize_server_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
+
+ def confirm_resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'confirm_resize',
+ confirm_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_revert_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(revertResize=None))
+
+ self.resize_called = False
+
+ def revert_resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'revert_resize',
+ revert_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
+ def test_revert_resize_server_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(revertResize=None))
+
+ def revert_resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'revert_resize',
+ revert_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index c2fc3a203..b4de2ef41 100644
--- a/nova/tests/api/openstack/test_shared_ip_groups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -15,19 +15,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
import stubout
+from nova import test
from nova.api.openstack import shared_ip_groups
-class SharedIpGroupsTest(unittest.TestCase):
+class SharedIpGroupsTest(test.TestCase):
def setUp(self):
+ super(SharedIpGroupsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.stubs.UnsetAll()
+ super(SharedIpGroupsTest, self).tearDown()
def test_get_shared_ip_groups(self):
pass
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index df497ef1b..555b206b9 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import stubout
import webob
@@ -22,6 +21,7 @@ import json
import nova.db
from nova import context
from nova import flags
+from nova import test
from nova.api.openstack import zones
from nova.tests.api.openstack import fakes
@@ -60,8 +60,9 @@ def zone_get_all(context):
password='qwerty')]
-class ZonesTest(unittest.TestCase):
+class ZonesTest(test.TestCase):
def setUp(self):
+ super(ZonesTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
@@ -81,6 +82,7 @@ class ZonesTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(ZonesTest, self).tearDown()
def test_get_zone_list(self):
req = webob.Request.blank('/v1.0/zones')
@@ -134,7 +136,3 @@ class ZonesTest(unittest.TestCase):
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
self.assertFalse('username' in res_dict['zone'])
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 44e2d615c..2c7852214 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -21,7 +21,7 @@
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
-import unittest
+from nova import test
import routes
import webob
@@ -29,7 +29,7 @@ import webob
from nova import wsgi
-class Test(unittest.TestCase):
+class Test(test.TestCase):
def test_debug(self):
@@ -92,7 +92,7 @@ class Test(unittest.TestCase):
self.assertNotEqual(result.body, "123")
-class SerializerTest(unittest.TestCase):
+class SerializerTest(test.TestCase):
def match(self, url, accept, expect):
input_dict = dict(servers=dict(a=(2, 3)))
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 05bdd172e..d760dc456 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -20,13 +20,22 @@
import time
from nova import db
+from nova import test
from nova import utils
-from nova.compute import instance_types
def stub_out_db_instance_api(stubs):
""" Stubs out the db API for creating Instances """
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
class FakeModel(object):
""" Stubs out for model """
def __init__(self, values):
@@ -41,10 +50,16 @@ def stub_out_db_instance_api(stubs):
else:
raise NotImplementedError()
+ def fake_instance_type_get_all(context, inactive=0):
+ return INSTANCE_TYPES
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
+
def fake_instance_create(values):
""" Stubs out the db.instance_create method """
- type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
+ type_data = INSTANCE_TYPES[values['instance_type']]
base_options = {
'name': values['name'],
@@ -73,3 +88,5 @@ def stub_out_db_instance_api(stubs):
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index cfa65c137..cbd949477 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -29,8 +29,8 @@ FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
-FLAGS.network_size = 16
-FLAGS.num_networks = 5
+FLAGS.network_size = 8
+FLAGS.num_networks = 2
FLAGS.fake_network = True
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
@@ -39,6 +39,5 @@ FLAGS.num_shelves = 2
FLAGS.blades_per_shelf = 4
FLAGS.iscsi_num_targets = 8
FLAGS.verbose = True
-FLAGS.sql_connection = 'sqlite:///nova.sqlite'
+FLAGS.sqlite_db = "tests.sqlite"
FLAGS.use_ipv6 = True
-FLAGS.logfile = 'tests.log'
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index f182b857a..3ff8d7ce5 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -26,12 +26,40 @@ def stubout_glance_client(stubs, cls):
class FakeGlance(object):
+ IMAGE_MACHINE = 1
+ IMAGE_KERNEL = 2
+ IMAGE_RAMDISK = 3
+ IMAGE_RAW = 4
+ IMAGE_VHD = 5
+
+ IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'type': 'machine'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'type': 'kernel'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'type': 'ramdisk'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'type': 'raw'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'type': 'vhd'},
+ 'image_data': StringIO.StringIO('')}}
+
def __init__(self, host, port=None, use_ssl=False):
pass
- def get_image(self, image):
- meta = {
- 'size': 0,
- }
- image_file = StringIO.StringIO('')
- return meta, image_file
+ def get_image_meta(self, image_id):
+ return self.IMAGE_FIXTURES[image_id]['image_meta']
+
+ def get_image(self, image_id):
+ image = self.IMAGE_FIXTURES[image_id]
+ return image['image_meta'], image['image_data']
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index da86e6e11..5a1be08eb 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -311,4 +311,5 @@ class S3APITestCase(test.TestCase):
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
+ super(S3APITestCase, self).tearDown()
return defer.DeferredList([stop_listening])
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 1824d24bc..b195fa520 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -66,6 +66,7 @@ class CloudTestCase(test.TestCase):
# set up services
self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.manager = manager.AuthManager()
@@ -73,8 +74,12 @@ class CloudTestCase(test.TestCase):
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
+ host = self.network.get_network_host(self.context.elevated())
def tearDown(self):
+ network_ref = db.project_get_network(self.context,
+ self.project.id)
+ db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.compute.kill()
@@ -131,6 +136,22 @@ class CloudTestCase(test.TestCase):
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
+ def test_describe_security_groups(self):
+ """Makes sure describe_security_groups works and filters results."""
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[sec['name']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ db.security_group_destroy(self.context, sec['id'])
+
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {})
@@ -201,27 +222,32 @@ class CloudTestCase(test.TestCase):
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
+ greenthread.sleep(0.3)
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
+ instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
rv = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
def test_ajax_console(self):
+ image_id = FLAGS.default_image
kwargs = {'image_id': image_id}
- rv = yield self.cloud.run_instances(self.context, **kwargs)
+ rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
- output = yield self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
- self.assertEquals(b64decode(output['output']),
- 'http://fakeajaxconsole.com/?token=FAKETOKEN')
+ greenthread.sleep(0.3)
+ output = self.cloud.get_ajax_console(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(output['url'],
+ '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
- rv = yield self.cloud.terminate_instances(self.context, [instance_id])
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
def test_key_generation(self):
result = self._create_key('test')
@@ -241,7 +267,7 @@ class CloudTestCase(test.TestCase):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
- keys = result["keypairsSet"]
+ keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
@@ -284,70 +310,6 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
- def test_describe_instances(self):
- """Makes sure describe_instances works."""
- instance1 = db.instance_create(self.context, {'host': 'host2'})
- comp1 = db.service_create(self.context, {'host': 'host2',
- 'availability_zone': 'zone1',
- 'topic': "compute"})
- result = self.cloud.describe_instances(self.context)
- self.assertEqual(result['reservationSet'][0]
- ['instancesSet'][0]
- ['placement']['availabilityZone'], 'zone1')
- db.instance_destroy(self.context, instance1['id'])
- db.service_destroy(self.context, comp1['id'])
-
- def test_instance_update_state(self):
- # TODO(termie): what is this code even testing?
- def instance(num):
- return {
- 'reservation_id': 'r-1',
- 'instance_id': 'i-%s' % num,
- 'image_id': 'ami-%s' % num,
- 'private_dns_name': '10.0.0.%s' % num,
- 'dns_name': '10.0.0%s' % num,
- 'ami_launch_index': str(num),
- 'instance_type': 'fake',
- 'availability_zone': 'fake',
- 'key_name': None,
- 'kernel_id': 'fake',
- 'ramdisk_id': 'fake',
- 'groups': ['default'],
- 'product_codes': None,
- 'state': 0x01,
- 'user_data': ''}
- rv = self.cloud._format_describe_instances(self.context)
- logging.error(str(rv))
- self.assertEqual(len(rv['reservationSet']), 0)
-
- # simulate launch of 5 instances
- # self.cloud.instances['pending'] = {}
- #for i in xrange(5):
- # inst = instance(i)
- # self.cloud.instances['pending'][inst['instance_id']] = inst
-
- #rv = self.cloud._format_instances(self.admin)
- #self.assert_(len(rv['reservationSet']) == 1)
- #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
- # report 4 nodes each having 1 of the instances
- #for i in xrange(4):
- # self.cloud.update_state('instances',
- # {('node-%s' % i): {('i-%s' % i):
- # instance(i)}})
-
- # one instance should be pending still
- #self.assert_(len(self.cloud.instances['pending'].keys()) == 1)
-
- # check that the reservations collapse
- #rv = self.cloud._format_instances(self.admin)
- #self.assert_(len(rv['reservationSet']) == 1)
- #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
-
- # check that we can get metadata for each instance
- #for i in xrange(4):
- # data = self.cloud.get_metadata(instance(i)['private_dns_name'])
- # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i)
-
@staticmethod
def _fake_set_image_description(ctxt, image_id, description):
from nova.objectstore import handler
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index b049ac943..58493d7ac 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import instance_types
LOG = logging.getLogger('nova.tests.compute')
@@ -56,7 +57,7 @@ class ComputeTestCase(test.TestCase):
self.manager.delete_project(self.project)
super(ComputeTestCase, self).tearDown()
- def _create_instance(self):
+ def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
@@ -67,6 +68,7 @@ class ComputeTestCase(test.TestCase):
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
+ inst.update(params)
return db.instance_create(self.context, inst)['id']
def _create_group(self):
@@ -266,3 +268,31 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_resize_instance(self):
+ """Ensure instance can be migrated/resized"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+ self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id, {'host': 'foo'})
+ self.compute.prep_resize(context, instance_id)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+ self.compute.resize_instance(context, instance_id,
+ migration_ref['id'])
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_get_by_flavor_id(self):
+ type = instance_types.get_by_flavor_id(1)
+ self.assertEqual(type, 'm1.tiny')
+
+ def test_resize_same_source_fails(self):
+ """Ensure instance fails to migrate when source and destination are
+ the same host"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.assertRaises(exception.Error, self.compute.prep_resize,
+ self.context, instance_id)
+ self.compute.terminate_instance(self.context, instance_id)
+ type = instance_types.get_by_flavor_id("1")
+ self.assertEqual(type, 'm1.tiny')
diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py
index 7656f5396..b6bfab534 100644
--- a/nova/tests/test_direct.py
+++ b/nova/tests/test_direct.py
@@ -52,6 +52,7 @@ class DirectTestCase(test.TestCase):
def tearDown(self):
direct.ROUTES = {}
+ super(DirectTestCase, self).tearDown()
def test_delegated_auth(self):
req = webob.Request.blank('/fake/context')
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
new file mode 100644
index 000000000..edc538879
--- /dev/null
+++ b/nova/tests/test_instance_types.py
@@ -0,0 +1,86 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for instance types code
+"""
+import time
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.compute import instance_types
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.compute')
+
+
+class InstanceTypeTestCase(test.TestCase):
+ """Test cases for instance type code"""
+ def setUp(self):
+ super(InstanceTypeTestCase, self).setUp()
+ session = get_session()
+ max_flavorid = session.query(models.InstanceTypes).\
+ order_by("flavorid desc").\
+ first()
+ self.flavorid = max_flavorid["flavorid"] + 1
+ self.name = str(int(time.time()))
+
+ def test_instance_type_create_then_delete(self):
+ """Ensure instance types can be created"""
+ starting_inst_list = instance_types.get_all_types()
+ instance_types.create(self.name, 256, 1, 120, self.flavorid)
+ new = instance_types.get_all_types()
+ self.assertNotEqual(len(starting_inst_list),
+ len(new),
+ 'instance type was not created')
+ instance_types.destroy(self.name)
+ self.assertEqual(1,
+ instance_types.get_instance_type(self.name)["deleted"])
+ self.assertEqual(starting_inst_list, instance_types.get_all_types())
+ instance_types.purge(self.name)
+ self.assertEqual(len(starting_inst_list),
+ len(instance_types.get_all_types()),
+ 'instance type not purged')
+
+ def test_get_all_instance_types(self):
+ """Ensures that all instance types can be retrieved"""
+ session = get_session()
+ total_instance_types = session.query(models.InstanceTypes).\
+ count()
+ inst_types = instance_types.get_all_types()
+ self.assertEqual(total_instance_types, len(inst_types))
+
+ def test_invalid_create_args_should_fail(self):
+ """Ensures that instance type creation fails with invalid args"""
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 0, 1, 120, self.flavorid)
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 256, -1, 120, self.flavorid)
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 256, 1, "aa", self.flavorid)
+
+ def test_non_existant_inst_type_shouldnt_delete(self):
+ """Ensures that instance type creation fails with invalid args"""
+ self.assertRaises(exception.ApiError,
+ instance_types.destroy, "sfsfsdfdfs")
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 00f9323f3..ce1c77210 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -42,15 +42,13 @@ class NetworkTestCase(test.TestCase):
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_call=True,
- fake_network=True,
- network_size=16,
- num_networks=5)
+ fake_network=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.RequestContext(project=None, user=self.user)
- for i in range(5):
+ for i in range(FLAGS.num_networks):
name = 'project%s' % i
project = self.manager.create_project(name, 'netuser', name)
self.projects.append(project)
@@ -117,6 +115,9 @@ class NetworkTestCase(test.TestCase):
utils.to_global_ipv6(
network_ref['cidr_v6'],
instance_ref['mac_address']))
+ self._deallocate_address(0, address)
+ db.instance_destroy(context.get_admin_context(),
+ instance_ref['id'])
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
@@ -192,7 +193,7 @@ class NetworkTestCase(test.TestCase):
first = self._create_address(0)
lease_ip(first)
instance_ids = []
- for i in range(1, 5):
+ for i in range(1, FLAGS.num_networks):
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address = self._create_address(i, instance_ref['id'])
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 9548a8c13..4ecb36b54 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import compute
from nova import context
from nova import db
from nova import flags
@@ -73,20 +74,43 @@ class QuotaTestCase(test.TestCase):
vol['size'] = size
return db.volume_create(self.context, vol)['id']
+ def _get_instance_type(self, name):
+ instance_types = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ return instance_types[name]
+
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
+
+ # metadata_items
+ too_many_items = FLAGS.quota_metadata_items + 1000
+ num_metadata_items = quota.allowed_metadata_items(self.context,
+ too_many_items)
+ self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
+ db.quota_update(self.context, self.project.id, {'metadata_items': 5})
+ num_metadata_items = quota.allowed_metadata_items(self.context,
+ too_many_items)
+ self.assertEqual(num_metadata_items, 5)
+
+ # Cleanup
db.quota_destroy(self.context, self.project.id)
def test_too_many_instances(self):
@@ -151,3 +175,15 @@ class QuotaTestCase(test.TestCase):
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_too_many_metadata_items(self):
+ metadata = {}
+ for i in range(FLAGS.quota_metadata_items + 1):
+ metadata['key%s' % i] = 'value%s' % i
+ self.assertRaises(quota.QuotaError, compute.API().create,
+ self.context,
+ min_count=1,
+ max_count=1,
+ instance_type='m1.small',
+ image_id='fake',
+ metadata=metadata)
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 250170072..b6888c4d2 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -150,6 +150,7 @@ class SimpleDriverTestCase(test.TestCase):
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
+ super(SimpleDriverTestCase, self).tearDown()
def _create_instance(self, **kwargs):
"""Create a test instance"""
@@ -270,6 +271,7 @@ class SimpleDriverTestCase(test.TestCase):
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id)
+ db.instance_destroy(self.context, instance_id)
for instance_id in instance_ids1:
compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index a67c8d1e8..45d9afa6c 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -50,13 +50,6 @@ class ExtendedService(service.Service):
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services"""
- def test_attribute_error_for_no_manager(self):
- serv = service.Service('test',
- 'test',
- 'test',
- 'nova.tests.test_service.FakeManager')
- self.assertRaises(AttributeError, getattr, serv, 'test_method')
-
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
new file mode 100644
index 000000000..34a407f1a
--- /dev/null
+++ b/nova/tests/test_utils.py
@@ -0,0 +1,174 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova import utils
+from nova import exception
+
+
+class GetFromPathTestCase(test.TestCase):
+ def test_tolerates_nones(self):
+ f = utils.get_from_path
+
+ input = []
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [None]
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': None}]
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': None}}]
+ self.assertEquals([{'b': None}], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}]
+ self.assertEquals([{'b': {'c': None}}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}, {'a': None}]
+ self.assertEquals([{'b': {'c': None}}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
+ self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ def test_does_select(self):
+ f = utils.get_from_path
+
+ input = [{'a': 'a_1'}]
+ self.assertEquals(['a_1'], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': 'b_1'}}]
+ self.assertEquals([{'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
+ self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}},
+ {'a': {'b': None}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}},
+ {'a': {'b': {'c': 'c_2'}}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
+ f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
+ self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c"))
+
+ self.assertEquals([], f(input, "a/b/c/d"))
+ self.assertEquals([], f(input, "c/a/b/d"))
+ self.assertEquals([], f(input, "i/r/t"))
+
+ def test_flattens_lists(self):
+ f = utils.get_from_path
+
+ input = [{'a': [1, 2, 3]}]
+ self.assertEquals([1, 2, 3], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': [1, 2, 3]}}]
+ self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
+ self.assertEquals([1, 2, 3], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': [1, 2, {'b': 'b_1'}]}]
+ self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
+
+ def test_bad_xpath(self):
+ f = utils.get_from_path
+
+ self.assertRaises(exception.Error, f, [], None)
+ self.assertRaises(exception.Error, f, [], "")
+ self.assertRaises(exception.Error, f, [], "/")
+ self.assertRaises(exception.Error, f, [], "/a")
+ self.assertRaises(exception.Error, f, [], "/a/")
+ self.assertRaises(exception.Error, f, [], "//")
+ self.assertRaises(exception.Error, f, [], "//a")
+ self.assertRaises(exception.Error, f, [], "a//a")
+ self.assertRaises(exception.Error, f, [], "a//a/")
+ self.assertRaises(exception.Error, f, [], "a/a/")
+
+ def test_real_failure1(self):
+ # Real world failure case...
+ # We weren't coping when the input was a Dictionary instead of a List
+ # This led to test_accepts_dictionaries
+ f = utils.get_from_path
+
+ inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
+ 'address': '192.168.0.3'},
+ 'hostname': ''}
+
+ private_ips = f(inst, 'fixed_ip/address')
+ public_ips = f(inst, 'fixed_ip/floating_ips/address')
+ self.assertEquals(['192.168.0.3'], private_ips)
+ self.assertEquals(['1.2.3.4'], public_ips)
+
+ def test_accepts_dictionaries(self):
+ f = utils.get_from_path
+
+ input = {'a': [1, 2, 3]}
+ self.assertEquals([1, 2, 3], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': {'b': [1, 2, 3]}}
+ self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
+ self.assertEquals([1, 2, 3], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': [1, 2, {'b': 'b_1'}]}
+ self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 6e5a0114b..f151ae911 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -204,11 +204,12 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_conn.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
+ db.instance_destroy(user_context, instance_ref['id'])
def tearDown(self):
- super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ super(LibvirtConnTestCase, self).tearDown()
class IptablesFirewallTestCase(test.TestCase):
@@ -365,6 +366,7 @@ class IptablesFirewallTestCase(test.TestCase):
'--dports 80:81 -j ACCEPT' % security_group_chain \
in self.out_rules,
"TCP port 80/81 acceptance rule wasn't added")
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
class NWFilterTestCase(test.TestCase):
@@ -388,6 +390,7 @@ class NWFilterTestCase(test.TestCase):
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ super(NWFilterTestCase, self).tearDown()
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
@@ -514,3 +517,4 @@ class NWFilterTestCase(test.TestCase):
self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 6b8efc9d8..7f437c2b8 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -31,6 +31,7 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
@@ -167,6 +168,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
+ stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
@@ -231,7 +233,7 @@ class XenAPIVMTestCase(test.TestCase):
vm = vms[0]
# Check that m1.large above turned into the right thing.
- instance_type = instance_types.INSTANCE_TYPES['m1.large']
+ instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
@@ -283,11 +285,17 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+
+ def test_spawn_vhd_glance(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, 2, 3)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
@@ -336,3 +344,101 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
+
+
+class XenAPIMigrateInstance(test.TestCase):
+ """
+ Unit test for verifying migration-related actions
+ """
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ FLAGS.target_host = '127.0.0.1'
+ FLAGS.xenapi_connection_url = 'test_url'
+ FLAGS.xenapi_connection_password = 'test_pass'
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ stubs.stub_out_get_target(self.stubs)
+ xenapi_fake.reset()
+ self.values = {'name': 1, 'id': 1,
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'image_id': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ stubs.stub_out_migration_methods(self.stubs)
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ conn = xenapi_conn.get_connection(False)
+ conn.migrate_disk_and_power_off(instance, '127.0.0.1')
+
+ def test_attach_disk(self):
+ instance = db.instance_create(self.values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ conn = xenapi_conn.get_connection(False)
+ conn.attach_disk(instance, {'base_copy': 'hurr', 'cow': 'durr'})
+
+
+class XenAPIDetermineDiskImageTestCase(test.TestCase):
+ """
+ Unit tests for code that detects the ImageType
+ """
+ def setUp(self):
+ super(XenAPIDetermineDiskImageTestCase, self).setUp()
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ class FakeInstance(object):
+ pass
+
+ self.fake_instance = FakeInstance()
+ self.fake_instance.id = 42
+
+ def assert_disk_type(self, disk_type):
+ dt = vm_utils.VMHelper.determine_disk_image_type(
+ self.fake_instance)
+ self.assertEqual(disk_type, dt)
+
+ def test_instance_disk(self):
+ """
+ If a kernel is specified then the image type is DISK (aka machine)
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
+ self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
+ self.assert_disk_type(vm_utils.ImageType.DISK)
+
+ def test_instance_disk_raw(self):
+ """
+ If the kernel isn't specified, and we're not using Glance, then
+ DISK_RAW is assumed.
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_raw(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'raw'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_vhd(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'vhd'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 624995ada..11e89c9b4 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -20,6 +20,7 @@ from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
def stubout_instance_snapshot(stubs):
@@ -27,7 +28,7 @@ def stubout_instance_snapshot(stubs):
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
- def fake_wait_for_task(self, id, task):
+ def fake_wait_for_task(self, task, id):
class FakeEvent:
def send(self, value):
@@ -130,6 +131,12 @@ def stubout_stream_disk(stubs):
stubs.Set(vm_utils, '_stream_disk', f)
+def stubout_is_vdi_pv(stubs):
+ def f(_1):
+ return False
+ stubs.Set(vm_utils, '_is_vdi_pv', f)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
@@ -171,6 +178,12 @@ class FakeSessionForVMTests(fake.SessionBase):
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+ def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
+ pass
+
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
@@ -205,3 +218,44 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
def SR_forget(self, _1, ref):
pass
+
+
+class FakeSessionForMigrationTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Migration tests"""
+ def __init__(self, uri):
+ super(FakeSessionForMigrationTests, self).__init__(uri)
+
+
+def stub_out_migration_methods(stubs):
+ def fake_get_snapshot(self, instance):
+ return 'foo', 'bar'
+
+ @classmethod
+ def fake_get_vdi(cls, session, vm_ref):
+ vdi_ref = fake.create_vdi(name_label='derp', read_only=False,
+ sr_ref='herp', sharable=False)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, {'uuid': vdi_rec['uuid'], }
+
+ def fake_shutdown(self, inst, vm, method='clean'):
+ pass
+
+ @classmethod
+ def fake_sr(cls, session, *args):
+ pass
+
+ @classmethod
+ def fake_get_sr_path(cls, *args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
+ stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot)
+ stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None)
+ stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown)
diff --git a/nova/utils.py b/nova/utils.py
index 2a3acf042..0cf91e0cc 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -31,6 +32,7 @@ import string
import struct
import sys
import time
+import types
from xml.sax import saxutils
import re
import netaddr
@@ -499,3 +501,52 @@ def ensure_b64_encoding(val):
return val
except TypeError:
return base64.b64encode(val)
+
+
+def get_from_path(items, path):
+ """ Returns a list of items matching the specified path. Takes an
+ XPath-like expression e.g. prop1/prop2/prop3, and for each item in items,
+ looks up items[prop1][prop2][prop3]. Like XPath, if any of the
+ intermediate results are lists it will treat each list item individually.
+ A 'None' in items or any child expressions will be ignored, this function
+ will not throw because of None (anywhere) in items. The returned list
+ will contain no None values."""
+
+ if path is None:
+ raise exception.Error("Invalid mini_xpath")
+
+ (first_token, sep, remainder) = path.partition("/")
+
+ if first_token == "":
+ raise exception.Error("Invalid mini_xpath")
+
+ results = []
+
+ if items is None:
+ return results
+
+ if not isinstance(items, types.ListType):
+ # Wrap single objects in a list
+ items = [items]
+
+ for item in items:
+ if item is None:
+ continue
+ get_method = getattr(item, "get", None)
+ if get_method is None:
+ continue
+ child = get_method(first_token)
+ if child is None:
+ continue
+ if isinstance(child, types.ListType):
+ # Flatten intermediate lists
+ for x in child:
+ results.append(x)
+ else:
+ results.append(child)
+
+ if not sep:
+ # No more tokens
+ return results
+ else:
+ return get_from_path(results, remainder)
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index cb639a102..2bded07a4 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -40,6 +40,8 @@ flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
flags.DEFINE_integer('timeout_nbd', 10,
'time to wait for a NBD device coming up')
+flags.DEFINE_integer('max_nbd_devices', 16,
+ 'maximum number of possible nbd devices')
def extend(image, size):
@@ -141,7 +143,7 @@ def _unlink_device(device, nbd):
utils.execute('sudo losetup --detach %s' % device)
-_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)]
+_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
def _allocate_device():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 92749f38a..c744acf91 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -139,6 +139,24 @@ class FakeConnection(object):
"""
pass
+ def get_host_ip_addr(self):
+ """
+ Retrieves the IP address of the dom0
+ """
+ pass
+
+ def resize(self, instance, flavor):
+ """
+ Resizes/Migrates the specified instance.
+
+ The flavor parameter determines whether or not the instance RAM and
+ disk space are modified, and if so, to what size.
+
+ The work will be done asynchronously. This function returns a task
+ that allows the caller to detect when it is complete.
+ """
+ pass
+
def set_admin_password(self, instance, new_pass):
"""
Set the root password on the specified instance.
@@ -179,6 +197,19 @@ class FakeConnection(object):
"""
pass
+ def migrate_disk_and_power_off(self, instance, dest):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ pass
+
+ def attach_disk(self, instance, disk_info):
+ """
+ Attaches the disk to an instance given the metadata disk_info
+ """
+ pass
+
def pause(self, instance, callback):
"""
Pause the specified instance.
@@ -319,7 +350,9 @@ class FakeConnection(object):
return 'FAKE CONSOLE OUTPUT'
def get_ajax_console(self, instance):
- return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
+ return {'token': 'FAKETOKEN',
+ 'host': 'fakeajaxconsole.com',
+ 'port': 6969}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 4e0fd106f..9f7315c17 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -55,6 +55,7 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+#from nova import test
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -362,7 +363,7 @@ class LibvirtConnection(object):
raise exception.APIError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance):
+ def rescue(self, instance, callback=None):
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -392,7 +393,7 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance):
+ def unrescue(self, instance, callback=None):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
self.reboot(instance)
@@ -606,7 +607,7 @@ class LibvirtConnection(object):
user=user,
project=project,
size=size)
- type_data = instance_types.INSTANCE_TYPES[inst['instance_type']]
+ type_data = instance_types.get_instance_type(inst['instance_type'])
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
@@ -667,7 +668,8 @@ class LibvirtConnection(object):
instance['id'])
# FIXME(vish): stick this in db
instance_type = instance['instance_type']
- instance_type = instance_types.INSTANCE_TYPES[instance_type]
+ # instance_type = test.INSTANCE_TYPES[instance_type]
+ instance_type = instance_types.get_instance_type(instance_type)
ip_address = db.instance_get_fixed_address(context.get_admin_context(),
instance['id'])
# Assume that the gateway also acts as the dhcp server.
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 018d0dcd3..ba12d4d3a 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -290,6 +290,9 @@ class SessionBase(object):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
+ def host_call_plugin(*args):
+ return 'herp'
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
@@ -401,7 +404,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- LOG.debuug(_('Raising NotImplemented'))
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 80cc3035d..80b7540d4 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -24,6 +24,7 @@ import pickle
import re
import time
import urllib
+import uuid
from xml.dom import minidom
from eventlet import event
@@ -63,11 +64,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
+ 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
+ DISK_VHD = 3
class VMHelper(HelperBase):
@@ -82,7 +86,8 @@ class VMHelper(HelperBase):
the pv_kernel flag indicates whether the guest is HVM or PV
"""
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.\
+ get_instance_type(instance.instance_type)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
@@ -140,7 +145,8 @@ class VMHelper(HelperBase):
@classmethod
def ensure_free_mem(cls, session, instance):
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.get_instance_type(
+ instance.instance_type)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
@@ -201,19 +207,17 @@ class VMHelper(HelperBase):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- session.wait_for_task(0, task)
+ session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address, dev="0"):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
- vif_rec['device'] = '0'
+ vif_rec['device'] = dev
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
@@ -249,24 +253,40 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
+ def get_vdi_for_vm_safely(cls, session, vm_ref):
+ vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
+ if vdi_refs is None:
+ raise Exception(_("No VDIs found for VM %s") % vm_ref)
+ else:
+ num_vdis = len(vdi_refs)
+ if num_vdis != 1:
+ raise Exception(
+ _("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
+
+ vdi_ref = vdi_refs[0]
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, vdi_rec
+
+ @classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
- """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
- Snapshot VHD
- """
+ """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
% locals())
- vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
- template_vm_ref = session.wait_for_task(instance_id, task)
- template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vm_ref = session.wait_for_task(task, instance_id)
+ template_vdi_rec = cls.get_vdi_for_vm_safely(session,
+ template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
@@ -276,29 +296,53 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
- return template_vm_ref, [template_vdi_uuid, parent_uuid]
+ template_vdi_uuids = {'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
+
+ @classmethod
+ def get_sr(cls, session, sr_label='slices'):
+ """Finds the SR named by the given name label and returns
+ the UUID"""
+ return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
+
+ @classmethod
+ def get_sr_path(cls, session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port}
+ 'glance_port': FLAGS.glance_port,
+ 'sr_path': cls.get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
- task = session.async_call_plugin('glance', 'put_vdis', kwargs)
- session.wait_for_task(instance_id, task)
+ task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
+ session.wait_for_task(task, instance_id)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project, type):
+ def fetch_image(cls, session, instance_id, image, user, project,
+ image_type):
"""
- type is interpreted as an ImageType instance
+ image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@@ -308,35 +352,80 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
- access, type)
+ access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret, type)
+ access, user.secret,
+ image_type)
+
+ @classmethod
+ def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ image_type):
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
+ % locals())
+
+ sr_ref = safe_find_sr(session)
+
+ # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
+ # have the `uuid` module. To work around this, we generate the uuids
+ # here (under Python 2.6+) and pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
+
+ params = {'image_id': image,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': cls.get_sr_path(session)}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'download_vhd', kwargs)
+ vdi_uuid = session.wait_for_task(task, instance_id)
+
+ cls.scan_sr(session, instance_id, sr_ref)
+
+ # Set the name-label to ease debugging
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ name_label = get_name_label_for_image(image)
+ session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
+
+ LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
+ % locals())
+ return vdi_uuid
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access, type):
- sr = find_sr(session)
- if sr is None:
- raise exception.NotFound('Cannot find SR to write VDI to')
+ def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ image_type):
+ """Fetch the image from Glance
- c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
+
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ sr_ref = safe_find_sr(session)
- meta, image_file = c.get_image(image)
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
- if type == ImageType.DISK:
+
+ if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
- vdi_size, False)
+ name_label = get_name_label_for_image(image)
+ vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
with_vdi_attached_here(session, vdi, False,
lambda dev:
- _stream_disk(dev, type,
+ _stream_disk(dev, image_type,
virtual_size, image_file))
- if (type == ImageType.KERNEL_RAMDISK):
+ if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
@@ -346,7 +435,7 @@ class VMHelper(HelperBase):
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(instance_id, task)
+ filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
session.get_xenapi().VDI.destroy(vdi)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
@@ -355,27 +444,97 @@ class VMHelper(HelperBase):
return session.get_xenapi().VDI.get_uuid(vdi)
@classmethod
+ def determine_disk_image_type(cls, instance):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ def log_disk_format(image_type):
+ pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ ImageType.DISK: 'DISK',
+ ImageType.DISK_RAW: 'DISK_RAW',
+ ImageType.DISK_VHD: 'DISK_VHD'}
+ disk_format = pretty_format[image_type]
+ image_id = instance.image_id
+ instance_id = instance.id
+ LOG.debug(_("Detected %(disk_format)s format for image "
+ "%(image_id)s, instance %(instance_id)s") % locals())
+
+ def determine_from_glance():
+ glance_type2nova_type = {'machine': ImageType.DISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD,
+ 'kernel': ImageType.KERNEL_RAMDISK,
+ 'ramdisk': ImageType.KERNEL_RAMDISK}
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta = client.get_image_meta(instance.image_id)
+ type_ = meta['type']
+ try:
+ return glance_type2nova_type[type_]
+ except KeyError:
+ raise exception.NotFound(
+ _("Unrecognized image type '%(type_)s'") % locals())
+
+ def determine_from_instance():
+ if instance.kernel_id:
+ return ImageType.DISK
+ else:
+ return ImageType.DISK_RAW
+
+ # FIXME(sirp): can we unify the ImageService and xenapi_image_service
+ # abstractions?
+ if FLAGS.xenapi_image_service == 'glance':
+ image_type = determine_from_glance()
+ else:
+ image_type = determine_from_instance()
+
+ log_disk_format(image_type)
+ return image_type
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access,
+ image_type):
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, access, image_type)
+ else:
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, access, image_type)
+
+ @classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, type):
+ secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
+ if image_type == ImageType.KERNEL_RAMDISK:
+ fn = 'get_kernel'
+ else:
+ fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if type != ImageType.KERNEL_RAMDISK:
+ if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
- if type == ImageType.DISK_RAW:
+ if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(instance_id, task)
+ uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
def lookup_image(cls, session, instance_id, vdi_ref):
+ """
+ Determine if VDI is using a PV kernel
+ """
if FLAGS.xenapi_image_service == 'glance':
return cls._lookup_image_glance(session, vdi_ref)
else:
@@ -388,7 +547,7 @@ class VMHelper(HelperBase):
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(instance_id, task)
+ pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
@@ -400,19 +559,7 @@ class VMHelper(HelperBase):
@classmethod
def _lookup_image_glance(cls, session, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
-
- def is_vdi_pv(dev):
- LOG.debug(_("Running pygrub against %s"), dev)
- output = os.popen('pygrub -qn /dev/%s' % dev)
- for line in output.readlines():
- #try to find kernel string
- m = re.search('(?<=kernel:)/.*(?:>)', line)
- if m and m.group(0).find('xen') != -1:
- LOG.debug(_("Found Xen kernel %s") % m.group(0))
- return True
- LOG.debug(_("No Xen kernel found. Booting HVM."))
- return False
- return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
+ return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
@classmethod
def lookup(cls, session, i):
@@ -496,6 +643,21 @@ class VMHelper(HelperBase):
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
+ @classmethod
+ def scan_sr(cls, session, instance_id=None, sr_ref=None):
+ """Scans the SR specified by sr_ref"""
+ if sr_ref:
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
+ task = session.call_xenapi('Async.SR.scan', sr_ref)
+ session.wait_for_task(task, instance_id)
+
+ @classmethod
+ def scan_default_sr(cls, session):
+ """Looks for the system default SR and triggers a re-scan"""
+ #FIXME(sirp/mdietz): refactor scan_default_sr in there
+ sr_ref = cls.get_sr(session)
+ session.call_xenapi('SR.scan', sr_ref)
+
def get_rrd(host, uuid):
"""Return the VM RRD XML as a string"""
@@ -538,12 +700,6 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
-def scan_sr(session, instance_id, sr_ref):
- LOG.debug(_("Re-scanning SR %s"), sr_ref)
- task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(instance_id, task)
-
-
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
@@ -568,7 +724,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
" %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
- scan_sr(session, instance_id, sr_ref)
+ VMHelper.scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
@@ -599,7 +755,18 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ return sr_ref
+
+
def find_sr(session):
+ """Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
srs = session.get_xenapi().SR.get_all()
for sr in srs:
@@ -714,9 +881,22 @@ def get_this_vm_ref(session):
return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
-def _stream_disk(dev, type, virtual_size, image_file):
+def _is_vdi_pv(dev):
+ LOG.debug(_("Running pygrub against %s"), dev)
+ output = os.popen('pygrub -qn /dev/%s' % dev)
+ for line in output.readlines():
+ #try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ return False
+
+
+def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
- if type == ImageType.DISK:
+ if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@@ -745,3 +925,8 @@ def _write_partition(virtual_size, dev):
(dest, primary_first, primary_last))
LOG.debug(_('Writing partition table %s done.'), dest)
+
+
+def get_name_label_for_image(image):
+ # TODO(sirp): This should eventually be the URI for the Glance image
+ return _('Glance image %s') % image
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 0168681f6..b862c9de9 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -22,6 +22,7 @@ Management class for VM-related functions (spawn, reboot, etc).
import json
import M2Crypto
import os
+import pickle
import subprocess
import tempfile
import uuid
@@ -49,6 +50,7 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
+
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
@@ -60,96 +62,80 @@ class VMOps(object):
vms.append(rec["name_label"])
return vms
- def spawn(self, instance):
+ def _start(self, instance, vm_ref=None):
+ """Power on a VM instance"""
+ if not vm_ref:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ if vm_ref is None:
+ raise exception(_('Attempted to power on non-existent instance'
+ ' bad instance id %s') % instance.id)
+ LOG.debug(_("Starting instance %s"), instance.name)
+ self._session.call_xenapi('VM.start', vm_ref, False, False)
+
+ def spawn(self, instance, disk):
"""Create VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
if vm is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance.name)
+ ' non-unique name %s') % instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
- name = instance['name']
- LOG.exception(_('instance %(name)s: not enough free memory')
- % locals())
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- return
+ LOG.exception(_('instance %(instance_name)s: not enough free '
+ 'memory') % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- #if kernel is not present we must download a raw disk
- if instance.kernel_id:
- disk_image_type = ImageType.DISK
+ vdi_ref = kernel = ramdisk = pv_kernel = None
+
+ # Are we building from a pre-existing disk?
+ if not disk:
+ #if kernel is not present we must download a raw disk
+
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+ vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
+ instance.image_id, user, project, disk_image_type)
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+
else:
- disk_image_type = ImageType.DISK_RAW
- vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
- instance.image_id, user, project, disk_image_type)
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- #Have a look at the VDI and see if it has a PV kernel
- pv_kernel = False
- if not instance.kernel_id:
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk)
+
+ if disk_image_type == ImageType.DISK_RAW:
+ # Have a look at the VDI and see if it has a PV kernel
pv_kernel = VMHelper.lookup_image(self._session, instance.id,
vdi_ref)
- kernel = None
+ elif disk_image_type == ImageType.DISK_VHD:
+ # TODO(sirp): Assuming PV for now; this will need to be
+ # configurable as Windows will use HVM.
+ pv_kernel = True
+
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
- ramdisk = None
+
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
+
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
- VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
-
- # write network info
- admin_context = context.get_admin_context()
-
- # TODO(tr3buchet) - remove comment in multi-nic
- # I've decided to go ahead and consider multiple IPs and networks
- # at this stage even though they aren't implemented because these will
- # be needed for multi-nic and there was no sense writing it for single
- # network/single IP and then having to turn around and re-write it
- IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
- for network in db.network_get_all_by_instance(admin_context,
- instance['id']):
- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
-
- def ip_dict(ip):
- return {'netmask': network['netmask'],
- 'enabled': '1',
- 'ip': ip.address}
-
- mac_id = instance.mac_address.replace(':', '')
- location = 'vm-data/networking/%s' % mac_id
- mapping = {'label': network['label'],
- 'gateway': network['gateway'],
- 'mac': instance.mac_address,
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
- self.write_to_param_xenstore(vm_ref, {location: mapping})
-
- # TODO(tr3buchet) - remove comment in multi-nic
- # this bit here about creating the vifs will be updated
- # in multi-nic to handle multiple IPs on the same network
- # and multiple networks
- # for now it works as there is only one of each
- bridge = network['bridge']
- network_ref = \
- NetworkHelper.find_network_with_bridge(self._session, bridge)
+ VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=vdi_ref, userdevice=0, bootable=True)
- if network_ref:
- VMHelper.create_vif(self._session, vm_ref,
- network_ref, instance.mac_address)
+ # inject_network_info and create vifs
+ networks = self.inject_network_info(instance)
+ self.create_vifs(instance, networks)
LOG.debug(_('Starting VM %s...'), vm_ref)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
- instance_name = instance.name
+ self._start(instance, vm_ref)
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
- % locals())
+ % locals())
def _inject_onset_files():
onset_files = instance.onset_files
@@ -173,18 +159,18 @@ class VMOps(object):
def _wait_for_boot():
try:
- state = self.get_info(instance['name'])['state']
+ state = self.get_info(instance_name)['state']
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- LOG.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
_inject_onset_files()
return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
+ instance_name)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -193,7 +179,7 @@ class VMOps(object):
timer.f = _wait_for_boot
- # call reset networking
+ # call to reset network to configure network from xenstore
self.reset_network(instance)
return timer.start(interval=0.5, now=True)
@@ -232,8 +218,22 @@ class VMOps(object):
_('Instance not present %s') % instance_name)
return vm
+ def _acquire_bootlock(self, vm):
+ """Prevent an instance from booting"""
+ self._session.call_xenapi(
+ "VM.set_blocked_operations",
+ vm,
+ {"start": ""})
+
+ def _release_bootlock(self, vm):
+ """Allow an instance to boot"""
+ self._session.call_xenapi(
+ "VM.remove_from_blocked_operations",
+ vm,
+ "start")
+
def snapshot(self, instance, image_id):
- """ Create snapshot from a running VM instance
+ """Create snapshot from a running VM instance
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
@@ -254,7 +254,20 @@ class VMOps(object):
that will bundle the VHDs together and then push the bundle into
Glance.
"""
+ template_vm_ref = None
+ try:
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ # call plugin to ship snapshot off to glance
+ VMHelper.upload_image(
+ self._session, instance.id, template_vdi_uuids, image_id)
+ finally:
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
+
+ logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ def _get_snapshot(self, instance):
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
@@ -265,25 +278,95 @@ class VMOps(object):
try:
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
+ return template_vm_ref, template_vdi_uuids
except self.XenAPI.Failure, exc:
logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
% locals())
return
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Copies a VHD from one host machine to another
+
+ :param instance: the instance that owns the VHD in question
+ :param dest: the destination host machine
+ :param disk_type: values are 'primary' or 'cow'
+ """
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ # The primary VDI becomes the COW after the snapshot, and we can
+ # identify it via the VBD. The base copy is the parent_uuid returned
+ # from the snapshot creation
+
+ base_copy_uuid = cow_uuid = None
+ template_vdi_uuids = template_vm_ref = None
try:
- # call plugin to ship snapshot off to glance
- VMHelper.upload_image(
- self._session, instance.id, template_vdi_uuids, image_id)
+ # transfer the base copy
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ base_copy_uuid = template_vdi_uuids[1]
+ vdi_ref, vm_vdi_rec = \
+ VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
+ cow_uuid = vm_vdi_rec['uuid']
+
+ params = {'host': dest,
+ 'vdi_uuid': base_copy_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session)}
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now power down the instance and transfer the COW VHD
+ self._shutdown(instance, vm_ref, method='clean')
+
+ params = {'host': dest,
+ 'vdi_uuid': cow_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
finally:
- self._destroy(instance, template_vm_ref, shutdown=False)
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
- logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ # TODO(mdietz): we could also consider renaming these to something
+ # sensible so we don't need to blindly pass around dictionaries
+ return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
+
+ def attach_disk(self, instance, disk_info):
+ """Links the base copy VHD to the COW via the XAPI plugin"""
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ new_base_copy_uuid = str(uuid.uuid4())
+ new_cow_uuid = str(uuid.uuid4())
+ params = {'instance_id': instance.id,
+ 'old_base_copy_uuid': disk_info['base_copy'],
+ 'old_cow_uuid': disk_info['cow'],
+ 'new_base_copy_uuid': new_base_copy_uuid,
+ 'new_cow_uuid': new_cow_uuid,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration',
+ 'move_vhds_into_sr', {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now we rescan the SR so we find the VHDs
+ VMHelper.scan_default_sr(self._session)
+
+ return new_cow_uuid
+
+ def resize(self, instance, flavor):
+ """Resize a running instance by changing it's RAM and disk size """
+ raise NotImplementedError()
def reboot(self, instance):
"""Reboot VM instance"""
vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance. This is done via
@@ -349,22 +432,32 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm):
- """Shutdown an instance """
+ def _shutdown(self, instance, vm, hard=True):
+ """Shutdown an instance"""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
locals())
return
+ instance_id = instance.id
+ LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
+ % locals())
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
- self._session.wait_for_task(instance.id, task)
+ task = None
+ if hard:
+ task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
+ else:
+ task = self._session.call_xenapi('Async.VM.clean_shutdown', vm)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
def _destroy_vdis(self, instance, vm):
"""Destroys all VDIs associated with a VM """
+ instance_id = instance.id
+ LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
+ % locals())
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
if not vdis:
@@ -373,33 +466,60 @@ class VMOps(object):
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _destroy_kernel_ramdisk(self, instance, vm):
+ """
+ Three situations can occur:
+
+ 1. We have neither a ramdisk nor a kernel, in which case we are a
+ RAW image and can omit this step
+
+ 2. We have one or the other, in which case, we should flag as an
+ error
+
+ 3. We have both, in which case we safely remove both the kernel
+ and the ramdisk.
+ """
+ instance_id = instance.id
+ if not instance.kernel_id and not instance.ramdisk_id:
+ # 1. No kernel or ramdisk
+ LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
+ "skipping kernel and ramdisk deletion") % locals())
+ return
+
+ if not (instance.kernel_id and instance.ramdisk_id):
+ # 2. We only have kernel xor ramdisk
+ raise exception.NotFound(
+ _("Instance %(instance_id)s has a kernel or ramdisk but not "
+ "both" % locals()))
+
+ # 3. We have both kernel and ramdisk
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
+ self._session, vm)
+
+ LOG.debug(_("Removing kernel/ramdisk files"))
+
+ args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task, instance.id)
+
+ LOG.debug(_("kernel/ramdisk files removed"))
+
def _destroy_vm(self, instance, vm):
"""Destroys a VM record """
+ instance_id = instance.id
try:
- kernel = None
- ramdisk = None
- if instance.kernel_id or instance.ramdisk_id:
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
- task1 = self._session.call_xenapi('Async.VM.destroy', vm)
- LOG.debug(_("Removing kernel/ramdisk files"))
- fn = "remove_kernel_ramdisk"
- args = {}
- if kernel:
- args['kernel-file'] = kernel
- if ramdisk:
- args['ramdisk-file'] = ramdisk
- task2 = self._session.async_call_plugin('glance', fn, args)
- self._session.wait_for_task(instance.id, task1)
- self._session.wait_for_task(instance.id, task2)
- LOG.debug(_("kernel/ramdisk files removed"))
+ task = self._session.call_xenapi('Async.VM.destroy', vm)
+ self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+
def destroy(self, instance):
"""
Destroy VM instance
@@ -407,32 +527,37 @@ class VMOps(object):
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
+ instance_id = instance.id
+ LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
vm = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True):
+ def _destroy(self, instance, vm, shutdown=True,
+ destroy_kernel_ramdisk=True):
"""
Destroys VM instance by performing:
- 1. A shutdown if requested
- 2. Destroying associated VDIs
- 3. Destroying that actual VM record
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying kernel and ramdisk files (if necessary)
+ 4. Destroying that actual VM record
"""
if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
self._shutdown(instance, vm)
self._destroy_vdis(instance, vm)
+ if destroy_kernel_ramdisk:
+ self._destroy_kernel_ramdisk(instance, vm)
self._destroy_vm(instance, vm)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
callback(ret)
@@ -461,6 +586,78 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
self._wait_with_callback(instance.id, task, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance
+ - shutdown the instance VM
+ - set 'bootlock' to prevent the instance from starting in rescue
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue)
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+ if rescue_vm:
+ raise RuntimeError(_(
+ "Instance is already in Rescue Mode: %s" % instance.name))
+
+ vm = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm)
+ self._acquire_bootlock(vm)
+
+ instance._rescue = True
+ self.spawn(instance)
+ rescue_vm = self._get_vm_opaque_ref(instance)
+
+ vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
+ vbd_ref = VMHelper.create_vbd(
+ self._session,
+ rescue_vm,
+ vdi_ref,
+ 1,
+ False)
+
+ self._session.call_xenapi("Async.VBD.plug", vbd_ref)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance
+ - unplug the instance VM's disk from the rescue VM
+ - teardown the rescue VM
+ - release the bootlock to allow the instance VM to start
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+
+ if not rescue_vm:
+ raise exception.NotFound(_(
+ "Instance is not in Rescue Mode: %s" % instance.name))
+
+ original_vm = self._get_vm_opaque_ref(instance)
+ vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
+
+ instance._rescue = False
+
+ for vbd_ref in vbds:
+ vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd["userdevice"] == "1":
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
+ task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
+ self._session.wait_for_task(task1, instance.id)
+
+ vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
+ for vdi in vdis:
+ try:
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ self._session.wait_for_task(task, instance.id)
+ except self.XenAPI.Failure:
+ continue
+
+ task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
+ self._session.wait_for_task(task2, instance.id)
+
+ self._release_bootlock(original_vm)
+ self._start(instance, original_vm)
+
def get_info(self, instance):
"""Return data about VM instance"""
vm = self._get_vm_opaque_ref(instance)
@@ -483,6 +680,94 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ def inject_network_info(self, instance):
+ """
+ Generate the network info and make calls to place it into the
+ xenstore and the xenstore param list
+
+ """
+ # TODO(tr3buchet) - remove comment in multi-nic
+ # I've decided to go ahead and consider multiple IPs and networks
+ # at this stage even though they aren't implemented because these will
+ # be needed for multi-nic and there was no sense writing it for single
+ # network/single IP and then having to turn around and re-write it
+ vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
+ logging.debug(_("injecting network info to xenstore for vm: |%s|"),
+ vm_opaque_ref)
+ admin_context = context.get_admin_context()
+ IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ for network in networks:
+ network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+
+ def ip_dict(ip):
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ return {
+ "ip": ip6.addressV6,
+ "netmask": ip6.netmaskV6,
+ "gateway": ip6.gatewayV6,
+ "enabled": "1"}
+
+ mac_id = instance.mac_address.replace(':', '')
+ location = 'vm-data/networking/%s' % mac_id
+ mapping = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance.mac_address,
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs],
+ 'ip6s': [ip6_dict(ip) for ip in network_IPs]}
+
+ self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
+
+ try:
+ self.write_to_xenstore(vm_opaque_ref, location,
+ mapping['location'])
+ except KeyError:
+ # catch KeyError for domid if instance isn't running
+ pass
+
+ return networks
+
+ def create_vifs(self, instance, networks=None):
+ """
+ Creates vifs for an instance
+
+ """
+ vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
+ logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref)
+ if networks is None:
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ # TODO(tr3buchet) - remove comment in multi-nic
+ # this bit here about creating the vifs will be updated
+ # in multi-nic to handle multiple IPs on the same network
+ # and multiple networks
+ # for now it works as there is only one of each
+ for network in networks:
+ bridge = network['bridge']
+ network_ref = \
+ NetworkHelper.find_network_with_bridge(self._session, bridge)
+
+ if network_ref:
+ try:
+ device = "1" if instance._rescue else "0"
+ except AttributeError:
+ device = "0"
+
+ VMHelper.create_vif(
+ self._session,
+ vm_opaque_ref,
+ network_ref,
+ instance.mac_address,
+ device)
+
def reset_network(self, instance):
"""
Creates uuid arg to pass to make_agent_call and calls it.
@@ -551,7 +836,7 @@ class VMOps(object):
args.update(addl_args)
try:
task = self._session.async_call_plugin(plugin, method, args)
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, e:
ret = None
err_trace = e.details[-1]
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d89a6f995..757ecf5ad 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -83,7 +83,7 @@ class VolumeOps(object):
try:
task = self._session.call_xenapi('Async.VBD.plug',
vbd_ref)
- self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ self._session.wait_for_task(task, vol_rec['deviceNumber'])
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c2f65699f..62e17e851 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts',
5,
'Max number of times to poll for VHD to coalesce.'
' Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
+ 'Base path to the storage repository')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -152,14 +154,18 @@ class XenAPIConnection(object):
"""List VM instances"""
return self._vmops.list_instances()
- def spawn(self, instance):
+ def spawn(self, instance, disk=None):
"""Create VM instance"""
- self._vmops.spawn(instance)
+ self._vmops.spawn(instance, disk)
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(instance, image_id)
+ def resize(self, instance, flavor):
+ """Resize a VM instance"""
+ raise NotImplementedError()
+
def reboot(self, instance):
"""Reboot VM instance"""
self._vmops.reboot(instance)
@@ -186,6 +192,15 @@ class XenAPIConnection(object):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ return self._vmops.migrate_disk_and_power_off(instance, dest)
+
+ def attach_disk(self, instance, disk_info):
+ """Moves the copied VDIs into the SR"""
+ return self._vmops.attach_disk(instance, disk_info)
+
def suspend(self, instance, callback):
"""suspend the specified instance"""
self._vmops.suspend(instance, callback)
@@ -194,10 +209,22 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ self._vmops.rescue(instance, callback)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ self._vmops.unrescue(instance, callback)
+
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
+ def inject_network_info(self, instance):
+ """inject network info for specified instance"""
+ self._vmops.inject_network_info(instance)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
@@ -214,6 +241,10 @@ class XenAPIConnection(object):
"""Return link to instance's ajax console"""
return self._vmops.get_ajax_console(instance)
+ def get_host_ip_addr(self):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return xs_url.netloc
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
@@ -273,7 +304,7 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, id, task):
+ def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
until it completes. Not re-entrant."""
done = event.Event()
@@ -300,10 +331,11 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
if status == "pending":
return
elif status == "success":
@@ -317,7 +349,9 @@ class XenAPISession(object):
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
- db.instance_action_create(context.get_admin_context(), action)
+
+ if id:
+ db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
LOG.warn(exc)
done.send_exception(*sys.exc_info())
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 82f4c2f54..e3744c790 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -21,6 +21,7 @@ Drivers for volumes.
"""
import time
+import os
from nova import exception
from nova import flags
@@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('num_shell_tries', 3,
'number of times to attempt to run flakey shell commands')
+flags.DEFINE_string('num_iscsi_scan_tries', 3,
+ 'number of times to rescan iSCSI target to find volume')
flags.DEFINE_integer('num_shelves',
100,
'Number of vblade shelves')
@@ -88,7 +91,8 @@ class VolumeDriver(object):
% FLAGS.volume_group)
def create_volume(self, volume):
- """Creates a logical volume."""
+ """Creates a logical volume. Can optionally return a Dictionary of
+ changes to the volume object to be persisted."""
if int(volume['size']) == 0:
sizestr = '100M'
else:
@@ -123,7 +127,8 @@ class VolumeDriver(object):
raise NotImplementedError()
def create_export(self, context, volume):
- """Exports the volume."""
+ """Exports the volume. Can optionally return a Dictionary of changes
+ to the volume object to be persisted."""
raise NotImplementedError()
def remove_export(self, context, volume):
@@ -222,7 +227,18 @@ class FakeAOEDriver(AOEDriver):
class ISCSIDriver(VolumeDriver):
- """Executes commands relating to ISCSI volumes."""
+ """Executes commands relating to ISCSI volumes.
+
+ We make use of model provider properties as follows:
+
+ :provider_location: if present, contains the iSCSI target information
+ in the same format as an ietadm discovery
+ i.e. '<ip>:<port>,<portal> <target IQN>'
+
+ :provider_auth: if present, contains a space-separated triple:
+ '<auth method> <auth username> <auth password>'.
+ `CHAP` is the only auth_method in use at the moment.
+ """
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
@@ -294,40 +310,149 @@ class ISCSIDriver(VolumeDriver):
self._execute("sudo ietadm --op delete --tid=%s" %
iscsi_target)
- def _get_name_and_portal(self, volume):
- """Gets iscsi name and portal from volume name and host."""
+ def _do_iscsi_discovery(self, volume):
+ #TODO(justinsb): Deprecate discovery and use stored info
+ #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
+ LOG.warn(_("ISCSI provider_location not stored, using discovery"))
+
volume_name = volume['name']
- host = volume['host']
+
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % host)
+ "sendtargets -p %s" % (volume['host']))
for target in out.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- iscsi_portal = location.split(",")[0]
- return (iscsi_name, iscsi_portal)
+ return target
+ return None
+
+ def _get_iscsi_properties(self, volume):
+ """Gets iscsi configuration
+
+ We ideally get saved information in the volume entity, but fall back
+ to discovery if need be. Discovery may be completely removed in future
+ The properties are:
+
+ :target_discovered: boolean indicating whether discovery was used
+
+ :target_iqn: the IQN of the iSCSI target
+
+ :target_portal: the portal of the iSCSI target
+
+ :auth_method:, :auth_username:, :auth_password:
+
+ the authentication details. Right now, either auth_method is not
+ present meaning no authentication, or auth_method == `CHAP`
+ meaning use CHAP with the specified credentials.
+ """
+
+ properties = {}
+
+ location = volume['provider_location']
+
+ if location:
+ # provider_location is the same format as iSCSI discovery output
+ properties['target_discovered'] = False
+ else:
+ location = self._do_iscsi_discovery(volume)
+
+ if not location:
+ raise exception.Error(_("Could not find iSCSI export "
+ " for volume %s") %
+ (volume['name']))
+
+ LOG.debug(_("ISCSI Discovery: Found %s") % (location))
+ properties['target_discovered'] = True
+
+ (iscsi_target, _sep, iscsi_name) = location.partition(" ")
+
+ iscsi_portal = iscsi_target.split(",")[0]
+
+ properties['target_iqn'] = iscsi_name
+ properties['target_portal'] = iscsi_portal
+
+ auth = volume['provider_auth']
+
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+
+ properties['auth_method'] = auth_method
+ properties['auth_username'] = auth_username
+ properties['auth_password'] = auth_secret
+
+ return properties
+
+ def _run_iscsiadm(self, iscsi_properties, iscsi_command):
+ command = ("sudo iscsiadm -m node -T %s -p %s %s" %
+ (iscsi_properties['target_iqn'],
+ iscsi_properties['target_portal'],
+ iscsi_command))
+ (out, err) = self._execute(command)
+ LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
+ (iscsi_command, out, err))
+ return (out, err)
+
+ def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
+ iscsi_command = ("--op update -n %s -v %s" %
+ (property_key, property_value))
+ return self._run_iscsiadm(iscsi_properties, iscsi_command)
def discover_volume(self, volume):
"""Discover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
- self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
- "-n node.startup -v automatic" %
- (iscsi_name, iscsi_portal))
- return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal,
- iscsi_name)
+ iscsi_properties = self._get_iscsi_properties(volume)
+
+ if not iscsi_properties['target_discovered']:
+ self._run_iscsiadm(iscsi_properties, "--op new")
+
+ if iscsi_properties.get('auth_method'):
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.authmethod",
+ iscsi_properties['auth_method'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.username",
+ iscsi_properties['auth_username'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.password",
+ iscsi_properties['auth_password'])
+
+ self._run_iscsiadm(iscsi_properties, "--login")
+
+ self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
+
+ mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" %
+ (iscsi_properties['target_portal'],
+ iscsi_properties['target_iqn']))
+
+ # The /dev/disk/by-path/... node is not always present immediately
+ # TODO(justinsb): This retry-with-delay is a pattern, move to utils?
+ tries = 0
+ while not os.path.exists(mount_device):
+ if tries >= FLAGS.num_iscsi_scan_tries:
+ raise exception.Error(_("iSCSI device not found at %s") %
+ (mount_device))
+
+ LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
+ "Will rescan & retry. Try number: %(tries)s") %
+ locals())
+
+ # The rescan isn't documented as being necessary(?), but it helps
+ self._run_iscsiadm(iscsi_properties, "--rescan")
+
+ tries = tries + 1
+ if not os.path.exists(mount_device):
+ time.sleep(tries ** 2)
+
+ if tries != 0:
+ LOG.debug(_("Found iSCSI node %(mount_device)s "
+ "(after %(tries)s rescans)") %
+ locals())
+
+ return mount_device
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
- self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
- "-n node.startup -v manual" %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node -T %s -p %s --logout " %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node --op delete "
- "--targetname %s" % iscsi_name)
+ iscsi_properties = self._get_iscsi_properties(volume)
+ self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
+ self._run_iscsiadm(iscsi_properties, "--logout")
+ self._run_iscsiadm(iscsi_properties, "--op delete")
class FakeISCSIDriver(ISCSIDriver):
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index d2f02e4e0..3e8bc16b3 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -107,10 +107,14 @@ class VolumeManager(manager.Manager):
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of"
" size %(vol_size)sG") % locals())
- self.driver.create_volume(volume_ref)
+ model_update = self.driver.create_volume(volume_ref)
+ if model_update:
+ self.db.volume_update(context, volume_ref['id'], model_update)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
- self.driver.create_export(context, volume_ref)
+ model_update = self.driver.create_export(context, volume_ref)
+ if model_update:
+ self.db.volume_update(context, volume_ref['id'], model_update)
except Exception:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
diff --git a/nova/volume/san.py b/nova/volume/san.py
index 26d6125e7..9532c8116 100644
--- a/nova/volume/san.py
+++ b/nova/volume/san.py
@@ -16,13 +16,16 @@
# under the License.
"""
Drivers for san-stored volumes.
+
The unique thing about a SAN is that we don't expect that we can run the volume
- controller on the SAN hardware. We expect to access it over SSH or some API.
+controller on the SAN hardware. We expect to access it over SSH or some API.
"""
import os
import paramiko
+from xml.etree import ElementTree
+
from nova import exception
from nova import flags
from nova import log as logging
@@ -41,37 +44,19 @@ flags.DEFINE_string('san_password', '',
'Password for SAN controller')
flags.DEFINE_string('san_privatekey', '',
'Filename of private key to use for SSH authentication')
+flags.DEFINE_string('san_clustername', '',
+ 'Cluster name to use for creating volumes')
+flags.DEFINE_integer('san_ssh_port', 22,
+ 'SSH port to use with SAN')
class SanISCSIDriver(ISCSIDriver):
""" Base class for SAN-style storage volumes
- (storage providers we access over SSH)"""
- #Override because SAN ip != host ip
- def _get_name_and_portal(self, volume):
- """Gets iscsi name and portal from volume name and host."""
- volume_name = volume['name']
-
- # TODO(justinsb): store in volume, remerge with generic iSCSI code
- host = FLAGS.san_ip
-
- (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % host)
-
- location = None
- find_iscsi_name = self._build_iscsi_target_name(volume)
- for target in out.splitlines():
- if find_iscsi_name in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- if not location:
- raise exception.Error(_("Could not find iSCSI export "
- " for volume %s") %
- volume_name)
-
- iscsi_portal = location.split(",")[0]
- LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
- (iscsi_name, iscsi_portal))
- return (iscsi_name, iscsi_portal)
+
+ A SAN-style storage value is 'different' because the volume controller
+ probably won't run on it, so we need to access is over SSH or another
+ remote protocol.
+ """
def _build_iscsi_target_name(self, volume):
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
@@ -85,6 +70,7 @@ class SanISCSIDriver(ISCSIDriver):
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if FLAGS.san_password:
ssh.connect(FLAGS.san_ip,
+ port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
password=FLAGS.san_password)
elif FLAGS.san_privatekey:
@@ -92,10 +78,11 @@ class SanISCSIDriver(ISCSIDriver):
# It sucks that paramiko doesn't support DSA keys
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
ssh.connect(FLAGS.san_ip,
+ port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
pkey=privatekey)
else:
- raise exception.Error("Specify san_password or san_privatekey")
+ raise exception.Error(_("Specify san_password or san_privatekey"))
return ssh
def _run_ssh(self, command, check_exit_code=True):
@@ -124,10 +111,10 @@ class SanISCSIDriver(ISCSIDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
if not (FLAGS.san_password or FLAGS.san_privatekey):
- raise exception.Error("Specify san_password or san_privatekey")
+ raise exception.Error(_("Specify san_password or san_privatekey"))
if not (FLAGS.san_ip):
- raise exception.Error("san_ip must be set")
+ raise exception.Error(_("san_ip must be set"))
def _collect_lines(data):
@@ -155,17 +142,27 @@ def _get_prefixed_values(data, prefix):
class SolarisISCSIDriver(SanISCSIDriver):
"""Executes commands relating to Solaris-hosted ISCSI volumes.
+
Basic setup for a Solaris iSCSI server:
+
pkg install storage-server SUNWiscsit
+
svcadm enable stmf
+
svcadm enable -r svc:/network/iscsi/target:default
+
pfexec itadm create-tpg e1000g0 ${MYIP}
+
pfexec itadm create-target -t e1000g0
+
Then grant the user that will be logging on lots of permissions.
I'm not sure exactly which though:
+
zfs allow justinsb create,mount,destroy rpool
+
usermod -P'File System Management' justinsb
+
usermod -P'Primary Administrator' justinsb
Also make sure you can login using san_login & san_password/san_privatekey
@@ -306,6 +303,17 @@ class SolarisISCSIDriver(SanISCSIDriver):
self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
(target_group_name, luid))
+ #TODO(justinsb): Is this always 1? Does it matter?
+ iscsi_portal_interface = '1'
+ iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
+
+ db_update = {}
+ db_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ iscsi_name))
+
+ return db_update
+
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
@@ -333,3 +341,245 @@ class SolarisISCSIDriver(SanISCSIDriver):
if self._is_lu_created(volume):
self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
(luid))
+
+
+class HpSanISCSIDriver(SanISCSIDriver):
+ """Executes commands relating to HP/Lefthand SAN ISCSI volumes.
+
+ We use the CLIQ interface, over SSH.
+
+ Rough overview of CLIQ commands used:
+
+ :createVolume: (creates the volume)
+
+ :getVolumeInfo: (to discover the IQN etc)
+
+ :getClusterInfo: (to discover the iSCSI target IP address)
+
+ :assignVolumeChap: (exports it with CHAP security)
+
+ The 'trick' here is that the HP SAN enforces security by default, so
+ normally a volume mount would need both to configure the SAN in the volume
+ layer and do the mount on the compute layer. Multi-layer operations are
+ not catered for at the moment in the nova architecture, so instead we
+ share the volume using CHAP at volume creation time. Then the mount need
+ only use those CHAP credentials, so can take place exclusively in the
+ compute layer.
+ """
+
+ def _cliq_run(self, verb, cliq_args):
+ """Runs a CLIQ command over SSH, without doing any result parsing"""
+ cliq_arg_strings = []
+ for k, v in cliq_args.items():
+ cliq_arg_strings.append(" %s=%s" % (k, v))
+ cmd = verb + ''.join(cliq_arg_strings)
+
+ return self._run_ssh(cmd)
+
+ def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
+ """Runs a CLIQ command over SSH, parsing and checking the output"""
+ cliq_args['output'] = 'XML'
+ (out, _err) = self._cliq_run(verb, cliq_args)
+
+ LOG.debug(_("CLIQ command returned %s"), out)
+
+ result_xml = ElementTree.fromstring(out)
+ if check_cliq_result:
+ response_node = result_xml.find("response")
+ if response_node is None:
+ msg = (_("Malformed response to CLIQ command "
+ "%(verb)s %(cliq_args)s. Result=%(out)s") %
+ locals())
+ raise exception.Error(msg)
+
+ result_code = response_node.attrib.get("result")
+
+ if result_code != "0":
+ msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
+ " Result=%(out)s") %
+ locals())
+ raise exception.Error(msg)
+
+ return result_xml
+
+ def _cliq_get_cluster_info(self, cluster_name):
+ """Queries for info about the cluster (including IP)"""
+ cliq_args = {}
+ cliq_args['clusterName'] = cluster_name
+ cliq_args['searchDepth'] = '1'
+ cliq_args['verbose'] = '0'
+
+ result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
+
+ return result_xml
+
+ def _cliq_get_cluster_vip(self, cluster_name):
+ """Gets the IP on which a cluster shares iSCSI volumes"""
+ cluster_xml = self._cliq_get_cluster_info(cluster_name)
+
+ vips = []
+ for vip in cluster_xml.findall("response/cluster/vip"):
+ vips.append(vip.attrib.get('ipAddress'))
+
+ if len(vips) == 1:
+ return vips[0]
+
+ _xml = ElementTree.tostring(cluster_xml)
+ msg = (_("Unexpected number of virtual ips for cluster "
+ " %(cluster_name)s. Result=%(_xml)s") %
+ locals())
+ raise exception.Error(msg)
+
+ def _cliq_get_volume_info(self, volume_name):
+ """Gets the volume info, including IQN"""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume_name
+ result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
+
+ # Result looks like this:
+ #<gauche version="1.0">
+ # <response description="Operation succeeded." name="CliqSuccess"
+ # processingTime="87" result="0">
+ # <volume autogrowPages="4" availability="online" blockSize="1024"
+ # bytesWritten="0" checkSum="false" clusterName="Cluster01"
+ # created="2011-02-08T19:56:53Z" deleting="false" description=""
+ # groupName="Group01" initialQuota="536870912" isPrimary="true"
+ # iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
+ # maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
+ # minReplication="1" name="vol-b" parity="0" replication="2"
+ # reserveQuota="536870912" scratchQuota="4194304"
+ # serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
+ # size="1073741824" stridePages="32" thinProvision="true">
+ # <status description="OK" value="2"/>
+ # <permission access="rw"
+ # authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
+ # chapName="chapusername" chapRequired="true" id="25369"
+ # initiatorSecret="" iqn="" iscsiEnabled="true"
+ # loadBalance="true" targetSecret="supersecret"/>
+ # </volume>
+ # </response>
+ #</gauche>
+
+ # Flatten the nodes into a dictionary; use prefixes to avoid collisions
+ volume_attributes = {}
+
+ volume_node = result_xml.find("response/volume")
+ for k, v in volume_node.attrib.items():
+ volume_attributes["volume." + k] = v
+
+ status_node = volume_node.find("status")
+ if not status_node is None:
+ for k, v in status_node.attrib.items():
+ volume_attributes["status." + k] = v
+
+ # We only consider the first permission node
+ permission_node = volume_node.find("permission")
+ if not permission_node is None:
+ for k, v in status_node.attrib.items():
+ volume_attributes["permission." + k] = v
+
+ LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
+ locals())
+ return volume_attributes
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ cliq_args = {}
+ cliq_args['clusterName'] = FLAGS.san_clustername
+ #TODO(justinsb): Should we default to inheriting thinProvision?
+ cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0'
+ cliq_args['volumeName'] = volume['name']
+ if int(volume['size']) == 0:
+ cliq_args['size'] = '100MB'
+ else:
+ cliq_args['size'] = '%sGB' % volume['size']
+
+ self._cliq_run_xml("createVolume", cliq_args)
+
+ volume_info = self._cliq_get_volume_info(volume['name'])
+ cluster_name = volume_info['volume.clusterName']
+ iscsi_iqn = volume_info['volume.iscsiIqn']
+
+ #TODO(justinsb): Is this always 1? Does it matter?
+ cluster_interface = '1'
+
+ cluster_vip = self._cliq_get_cluster_vip(cluster_name)
+ iscsi_portal = cluster_vip + ":3260," + cluster_interface
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ iscsi_iqn))
+
+ return model_update
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+ cliq_args['prompt'] = 'false' # Don't confirm
+
+ self._cliq_run_xml("deleteVolume", cliq_args)
+
+ def local_path(self, volume):
+ # TODO(justinsb): Is this needed here?
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ return self._do_export(context, volume, force_create=False)
+
+ def create_export(self, context, volume):
+ return self._do_export(context, volume, force_create=True)
+
+ def _do_export(self, context, volume, force_create):
+ """Supports ensure_export and create_export"""
+ volume_info = self._cliq_get_volume_info(volume['name'])
+
+ is_shared = 'permission.authGroup' in volume_info
+
+ model_update = {}
+
+ should_export = False
+
+ if force_create or not is_shared:
+ should_export = True
+ # Check that we have a project_id
+ project_id = volume['project_id']
+ if not project_id:
+ project_id = context.project_id
+
+ if project_id:
+ #TODO(justinsb): Use a real per-project password here
+ chap_username = 'proj_' + project_id
+ # HP/Lefthand requires that the password be >= 12 characters
+ chap_password = 'project_secret_' + project_id
+ else:
+ msg = (_("Could not determine project for volume %s, "
+ "can't export") %
+ (volume['name']))
+ if force_create:
+ raise exception.Error(msg)
+ else:
+ LOG.warn(msg)
+ should_export = False
+
+ if should_export:
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+ cliq_args['chapName'] = chap_username
+ cliq_args['targetSecret'] = chap_password
+
+ self._cliq_run_xml("assignVolumeChap", cliq_args)
+
+ model_update['provider_auth'] = ("CHAP %s %s" %
+ (chap_username, chap_password))
+
+ return model_update
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+
+ self._cliq_run_xml("unassignVolume", cliq_args)
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 8cb439259..aa12d432a 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -21,17 +21,14 @@
# XenAPI plugin for managing glance images
#
-import base64
-import errno
-import hmac
import httplib
import os
import os.path
import pickle
-import sha
+import shlex
+import shutil
import subprocess
-import time
-import urlparse
+import tempfile
import XenAPIPlugin
@@ -41,30 +38,6 @@ configure_logging('glance')
CHUNK_SIZE = 8192
KERNEL_DIR = '/boot/guest'
-FILE_SR_PATH = '/var/run/sr-mount'
-
-
-def remove_kernel_ramdisk(session, args):
- """Removes kernel and/or ramdisk from dom0's file system"""
- kernel_file = exists(args, 'kernel-file')
- ramdisk_file = exists(args, 'ramdisk-file')
- if kernel_file:
- os.remove(kernel_file)
- if ramdisk_file:
- os.remove(ramdisk_file)
- return "ok"
-
-
-def copy_kernel_vdi(session, args):
- vdi = exists(args, 'vdi-ref')
- size = exists(args, 'image-size')
- #Use the uuid as a filename
- vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
- copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
- filename = with_vdi_in_dom0(session, vdi, False,
- lambda dev:
- _copy_kernel_vdi('/dev/%s' % dev, copy_args))
- return filename
def _copy_kernel_vdi(dest, copy_args):
@@ -73,6 +46,10 @@ def _copy_kernel_vdi(dest, copy_args):
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
dest, vdi_uuid)
filename = KERNEL_DIR + '/' + vdi_uuid
+ #make sure KERNEL_DIR exists, otherwise create it
+ if not os.path.isdir(KERNEL_DIR):
+ logging.debug("Creating directory %s", KERNEL_DIR)
+ os.makedirs(KERNEL_DIR)
#read data from /dev/ and write into a file on /boot/guest
of = open(filename, 'wb')
f = open(dest, 'rb')
@@ -85,93 +62,309 @@ def _copy_kernel_vdi(dest, copy_args):
return filename
-def put_vdis(session, args):
+def _download_tarball(sr_path, staging_path, image_id, glance_host,
+ glance_port):
+ """Download the tarball image from Glance and extract it into the staging
+ area.
+ """
+ conn = httplib.HTTPConnection(glance_host, glance_port)
+ conn.request('GET', '/images/%s' % image_id)
+ resp = conn.getresponse()
+ if resp.status == httplib.NOT_FOUND:
+ raise Exception("Image '%s' not found in Glance" % image_id)
+ elif resp.status != httplib.OK:
+ raise Exception("Unexpected response from Glance %i" % res.status)
+
+ tar_cmd = "tar -zx --directory=%(staging_path)s" % locals()
+ tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True)
+
+ chunk = resp.read(CHUNK_SIZE)
+ while chunk:
+ tar_proc.stdin.write(chunk)
+ chunk = resp.read(CHUNK_SIZE)
+
+ _finish_subprocess(tar_proc, tar_cmd)
+ conn.close()
+
+
+def _fixup_vhds(sr_path, staging_path, uuid_stack):
+ """Fixup the downloaded VHDs before we move them into the SR.
+
+ We cannot extract VHDs directly into the SR since they don't yet have
+ UUIDs, aren't properly associated with each other, and would be subject to
+ a race-condition of one-file being present and the other not being
+ downloaded yet.
+
+ To avoid these we problems, we use a staging area to fixup the VHDs before
+ moving them into the SR. The steps involved are:
+
+ 1. Extracting tarball into staging area
+
+ 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd')
+
+ 3. Linking the two VHDs together
+
+ 4. Pseudo-atomically moving the images into the SR. (It's not really
+ atomic because it takes place as two os.rename operations; however,
+ the chances of an SR.scan occuring between the two rename()
+ invocations is so small that we can safely ignore it)
+ """
+ def rename_with_uuid(orig_path):
+ """Rename VHD using UUID so that it will be recognized by SR on a
+ subsequent scan.
+
+ Since Python2.4 doesn't have the `uuid` module, we pass a stack of
+ pre-computed UUIDs from the compute worker.
+ """
+ orig_dirname = os.path.dirname(orig_path)
+ uuid = uuid_stack.pop()
+ new_path = os.path.join(orig_dirname, "%s.vhd" % uuid)
+ os.rename(orig_path, new_path)
+ return new_path, uuid
+
+ def link_vhds(child_path, parent_path):
+ """Use vhd-util to associate the snapshot VHD with its base_copy.
+
+ This needs to be done before we move both VHDs into the SR to prevent
+ the base_copy from being DOA (deleted-on-arrival).
+ """
+ modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s"
+ % locals())
+ modify_proc = _make_subprocess(modify_cmd, stderr=True)
+ _finish_subprocess(modify_proc, modify_cmd)
+
+ def move_into_sr(orig_path):
+ """Move a file into the SR"""
+ filename = os.path.basename(orig_path)
+ new_path = os.path.join(sr_path, filename)
+ os.rename(orig_path, new_path)
+ return new_path
+
+ def assert_vhd_not_hidden(path):
+ """
+ This is a sanity check on the image; if a snap.vhd isn't
+ present, then the image.vhd better not be marked 'hidden' or it will
+ be deleted when moved into the SR.
+ """
+ query_cmd = "vhd-util query -n %(path)s -f" % locals()
+ query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True)
+ out, err = _finish_subprocess(query_proc, query_cmd)
+
+ for line in out.splitlines():
+ if line.startswith('hidden'):
+ value = line.split(':')[1].strip()
+ if value == "1":
+ raise Exception(
+ "VHD %(path)s is marked as hidden without child" %
+ locals())
+
+ orig_base_copy_path = os.path.join(staging_path, 'image.vhd')
+ if not os.path.exists(orig_base_copy_path):
+ raise Exception("Invalid image: image.vhd not present")
+
+ base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path)
+
+ vdi_uuid = base_copy_uuid
+ orig_snap_path = os.path.join(staging_path, 'snap.vhd')
+ if os.path.exists(orig_snap_path):
+ snap_path, snap_uuid = rename_with_uuid(orig_snap_path)
+ vdi_uuid = snap_uuid
+ # NOTE(sirp): this step is necessary so that an SR scan won't
+ # delete the base_copy out from under us (since it would be
+ # orphaned)
+ link_vhds(snap_path, base_copy_path)
+ move_into_sr(snap_path)
+ else:
+ assert_vhd_not_hidden(base_copy_path)
+
+ move_into_sr(base_copy_path)
+ return vdi_uuid
+
+
+def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
+ """Hard-link VHDs into staging area with appropriate filename
+ ('snap' or 'image.vhd')
+ """
+ for name, uuid in vdi_uuids.items():
+ source = os.path.join(sr_path, "%s.vhd" % uuid)
+ link_name = os.path.join(staging_path, "%s.vhd" % name)
+ os.link(source, link_name)
+
+
+def _upload_tarball(staging_path, image_id, glance_host, glance_port):
+ """
+ Create a tarball of the image and then stream that into Glance
+ using chunked-transfer-encoded HTTP.
+ """
+ conn = httplib.HTTPConnection(glance_host, glance_port)
+ # NOTE(sirp): httplib under python2.4 won't accept a file-like object
+ # to request
+ conn.putrequest('PUT', '/images/%s' % image_id)
+
+ # TODO(sirp): make `store` configurable
+ headers = {
+ 'content-type': 'application/octet-stream',
+ 'transfer-encoding': 'chunked',
+ 'x-image-meta-is_public': 'True',
+ 'x-image-meta-status': 'queued',
+ 'x-image-meta-type': 'vhd'}
+ for header, value in headers.iteritems():
+ conn.putheader(header, value)
+ conn.endheaders()
+
+ tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals()
+ tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True)
+
+ chunk = tar_proc.stdout.read(CHUNK_SIZE)
+ while chunk:
+ conn.send("%x\r\n%s\r\n" % (len(chunk), chunk))
+ chunk = tar_proc.stdout.read(CHUNK_SIZE)
+ conn.send("0\r\n\r\n")
+
+ _finish_subprocess(tar_proc, tar_cmd)
+
+ resp = conn.getresponse()
+ if resp.status != httplib.OK:
+ raise Exception("Unexpected response from Glance %i" % resp.status)
+ conn.close()
+
+
+def _make_staging_area(sr_path):
+ """
+ The staging area is a place where we can temporarily store and
+ manipulate VHDs. The use of the staging area is different for upload and
+ download:
+
+ Download
+ ========
+
+ When we download the tarball, the VHDs contained within will have names
+ like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before
+ moving them into the SR. However, since 'image.vhd' may be a base_copy, we
+ need to link it to 'snap.vhd' (using vhd-util modify) before moving both
+ into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted).
+ The staging area gives us a place to perform these operations before they
+ are moved to the SR, scanned, and then registered with XenServer.
+
+ Upload
+ ======
+
+ On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd'
+ in the case of the snapshot VHD, and 'image.vhd' in the case of the
+ base_copy. The staging area provides a directory in which we can create
+ hard-links to rename the VHDs without affecting what's in the SR.
+
+
+ NOTE
+ ====
+
+ The staging area is created as a subdirectory within the SR in order to
+ guarantee that it resides within the same filesystem and therefore permit
+ hard-linking and cheap file moves.
+ """
+ staging_path = tempfile.mkdtemp(dir=sr_path)
+ return staging_path
+
+
+def _cleanup_staging_area(staging_path):
+ """Remove staging area directory
+
+ On upload, the staging area contains hard-links to the VHDs in the SR;
+ it's safe to remove the staging-area because the SR will keep the link
+ count > 0 (so the VHDs in the SR will not be deleted).
+ """
+ shutil.rmtree(staging_path)
+
+
+def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False):
+ """Make a subprocess according to the given command-line string
+ """
+ kwargs = {}
+ kwargs['stdout'] = stdout and subprocess.PIPE or None
+ kwargs['stderr'] = stderr and subprocess.PIPE or None
+ kwargs['stdin'] = stdin and subprocess.PIPE or None
+ args = shlex.split(cmdline)
+ proc = subprocess.Popen(args, **kwargs)
+ return proc
+
+
+def _finish_subprocess(proc, cmdline):
+ """Ensure that the process returned a zero exit code indicating success
+ """
+ out, err = proc.communicate()
+ ret = proc.returncode
+ if ret != 0:
+ raise Exception("'%(cmdline)s' returned non-zero exit code: "
+ "retcode=%(ret)i, stderr='%(err)s'" % locals())
+ return out, err
+
+
+def download_vhd(session, args):
+ """Download an image from Glance, unbundle it, and then deposit the VHDs
+ into the storage repository
+ """
params = pickle.loads(exists(args, 'params'))
- vdi_uuids = params["vdi_uuids"]
image_id = params["image_id"]
glance_host = params["glance_host"]
glance_port = params["glance_port"]
+ uuid_stack = params["uuid_stack"]
+ sr_path = params["sr_path"]
- sr_path = get_sr_path(session)
- #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
- tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
- tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
- paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids]
- tar_cmd.extend(paths)
- logging.debug("Bundling image with cmd: %s", tar_cmd)
- subprocess.call(tar_cmd)
- logging.debug("Writing to test file %s", tmp_file)
- put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port)
- # FIXME(sirp): return anything useful here?
- return ""
-
-
-def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
- size = os.path.getsize(tmp_file)
- basename = os.path.basename(tmp_file)
-
- bundle = open(tmp_file, 'r')
+ staging_path = _make_staging_area(sr_path)
try:
- headers = {
- 'x-image-meta-store': 'file',
- 'x-image-meta-is_public': 'True',
- 'x-image-meta-type': 'raw',
- 'x-image-meta-size': size,
- 'content-length': size,
- 'content-type': 'application/octet-stream',
- }
- conn = httplib.HTTPConnection(glance_host, glance_port)
- #NOTE(sirp): httplib under python2.4 won't accept a file-like object
- # to request
- conn.putrequest('PUT', '/images/%s' % image_id)
-
- for header, value in headers.iteritems():
- conn.putheader(header, value)
- conn.endheaders()
-
- chunk = bundle.read(CHUNK_SIZE)
- while chunk:
- conn.send(chunk)
- chunk = bundle.read(CHUNK_SIZE)
-
- res = conn.getresponse()
- #FIXME(sirp): should this be 201 Created?
- if res.status != httplib.OK:
- raise Exception("Unexpected response from Glance %i" % res.status)
+ _download_tarball(sr_path, staging_path, image_id, glance_host,
+ glance_port)
+ vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack)
+ return vdi_uuid
finally:
- bundle.close()
+ _cleanup_staging_area(staging_path)
+
+
+def upload_vhd(session, args):
+ """Bundle the VHDs comprising an image and then stream them into Glance.
+ """
+ params = pickle.loads(exists(args, 'params'))
+ vdi_uuids = params["vdi_uuids"]
+ image_id = params["image_id"]
+ glance_host = params["glance_host"]
+ glance_port = params["glance_port"]
+ sr_path = params["sr_path"]
+ staging_path = _make_staging_area(sr_path)
+ try:
+ _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
+ _upload_tarball(staging_path, image_id, glance_host, glance_port)
+ finally:
+ _cleanup_staging_area(staging_path)
-def get_sr_path(session):
- sr_ref = find_sr(session)
+ return "" # Nothing useful to return on an upload
- if sr_ref is None:
- raise Exception('Cannot find SR to read VDI from')
- sr_rec = session.xenapi.SR.get_record(sr_ref)
- sr_uuid = sr_rec["uuid"]
- sr_path = os.path.join(FILE_SR_PATH, sr_uuid)
- return sr_path
+def copy_kernel_vdi(session, args):
+ vdi = exists(args, 'vdi-ref')
+ size = exists(args, 'image-size')
+ #Use the uuid as a filename
+ vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
+ copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
+ filename = with_vdi_in_dom0(session, vdi, False,
+ lambda dev:
+ _copy_kernel_vdi('/dev/%s' % dev, copy_args))
+ return filename
-#TODO(sirp): both objectstore and glance need this, should this be refactored
-#into common lib
-def find_sr(session):
- host = get_this_host(session)
- srs = session.xenapi.SR.get_all()
- for sr in srs:
- sr_rec = session.xenapi.SR.get_record(sr)
- if not ('i18n-key' in sr_rec['other_config'] and
- sr_rec['other_config']['i18n-key'] == 'local-storage'):
- continue
- for pbd in sr_rec['PBDs']:
- pbd_rec = session.xenapi.PBD.get_record(pbd)
- if pbd_rec['host'] == host:
- return sr
- return None
+def remove_kernel_ramdisk(session, args):
+ """Removes kernel and/or ramdisk from dom0's file system"""
+ kernel_file = exists(args, 'kernel-file')
+ ramdisk_file = exists(args, 'ramdisk-file')
+ if kernel_file:
+ os.remove(kernel_file)
+ if ramdisk_file:
+ os.remove(ramdisk_file)
+ return "ok"
if __name__ == '__main__':
- XenAPIPlugin.dispatch({'put_vdis': put_vdis,
+ XenAPIPlugin.dispatch({'upload_vhd': upload_vhd,
+ 'download_vhd': download_vhd,
'copy_kernel_vdi': copy_kernel_vdi,
'remove_kernel_ramdisk': remove_kernel_ramdisk})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
new file mode 100644
index 000000000..4aa89863a
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+XenAPI Plugin for transfering data between host nodes
+"""
+
+import os
+import os.path
+import pickle
+import shutil
+import subprocess
+
+import XenAPIPlugin
+
+from pluginlib_nova import *
+configure_logging('migration')
+
+
+def move_vhds_into_sr(session, args):
+ """Moves the VHDs from their copied location to the SR"""
+ params = pickle.loads(exists(args, 'params'))
+ instance_id = params['instance_id']
+
+ old_base_copy_uuid = params['old_base_copy_uuid']
+ old_cow_uuid = params['old_cow_uuid']
+
+ new_base_copy_uuid = params['new_base_copy_uuid']
+ new_cow_uuid = params['new_cow_uuid']
+
+ sr_path = params['sr_path']
+ sr_temp_path = "%s/images/" % sr_path
+
+ # Discover the copied VHDs locally, and then set up paths to copy
+ # them to under the SR
+ source_image_path = "%s/instance%d" % ('/images/', instance_id)
+ source_base_copy_path = "%s/%s.vhd" % (source_image_path,
+ old_base_copy_uuid)
+ source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
+
+ temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id)
+ new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid)
+ new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid)
+
+ logging.debug('Creating temporary SR path %s' % temp_vhd_path)
+ os.makedirs(temp_vhd_path)
+
+ logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path))
+ shutil.move(source_base_copy_path, new_base_copy_path)
+
+ logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path))
+ shutil.move(source_cow_path, new_cow_path)
+
+ logging.debug('Cleaning up %s' % source_image_path)
+ os.rmdir(source_image_path)
+
+ # Link the COW to the base copy
+ logging.debug('Attaching COW to the base copy %s -> %s' %
+ (new_cow_path, new_base_copy_path))
+ subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' %
+ (new_cow_path, new_base_copy_path)))
+ logging.debug('Moving VHDs into SR %s' % sr_path)
+ shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path)
+ shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path)
+
+ logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path)
+ os.rmdir(temp_vhd_path)
+ return ""
+
+
+def transfer_vhd(session, args):
+ """Rsyncs a VHD to an adjacent host"""
+ params = pickle.loads(exists(args, 'params'))
+ instance_id = params['instance_id']
+ host = params['host']
+ vdi_uuid = params['vdi_uuid']
+ sr_path = params['sr_path']
+ vhd_path = "%s.vhd" % vdi_uuid
+
+ source_path = "%s/%s" % (sr_path, vhd_path)
+ dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id)
+
+ logging.debug("Preparing to transmit %s to %s" % (source_path,
+ dest_path))
+
+ ssh_cmd = 'ssh -o StrictHostKeyChecking=no'
+
+ rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s'
+ % (ssh_cmd, source_path, dest_path))
+
+ logging.debug('rsync %s' % (' '.join(rsync_args, )))
+
+ rsync_proc = subprocess.Popen(rsync_args, stdout=subprocess.PIPE)
+ logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0])
+ logging.debug('Rsync return: %d' % rsync_proc.returncode)
+ if rsync_proc.returncode != 0:
+ raise Exception("Unexpected VHD transfer failure")
+ return ""
+
+
+if __name__ == '__main__':
+ XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd,
+ 'move_vhds_into_sr': move_vhds_into_sr, })
diff --git a/run_tests.py b/run_tests.py
index 6d96454b9..3c8d410e1 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -17,27 +17,245 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""Unittest runner for Nova.
+
+To run all tests
+ python run_tests.py
+
+To run a single test:
+ python run_tests.py test_compute:ComputeTestCase.test_run_terminate
+
+To run a single test module:
+ python run_tests.py test_compute
+
+ or
+
+ python run_tests.py api.test_wsgi
+
+"""
+
import gettext
import os
import unittest
import sys
from nose import config
-from nose import result
from nose import core
+from nose import result
from nova import log as logging
from nova.tests import fake_flags
+class _AnsiColorizer(object):
+ """
+ A colorizer is an object that loosely wraps around a stream, allowing
+ callers to write text to the stream in a particular color.
+
+ Colorizer classes must implement C{supported()} and C{write(text, color)}.
+ """
+ _colors = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, magenta=35, cyan=36, white=37)
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ """
+ A class method that returns True if the current platform supports
+ coloring terminal output using this method. Returns False otherwise.
+ """
+ if not stream.isatty():
+ return False # auto color only on TTYs
+ try:
+ import curses
+ except ImportError:
+ return False
+ else:
+ try:
+ try:
+ return curses.tigetnum("colors") > 2
+ except curses.error:
+ curses.setupterm()
+ return curses.tigetnum("colors") > 2
+ except:
+ raise
+ # guess false in case of error
+ return False
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ """
+ Write the given text to the stream in the given color.
+
+ @param text: Text to be written to the stream.
+
+ @param color: A string label for a color. e.g. 'red', 'white'.
+ """
+ color = self._colors[color]
+ self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ from win32console import GetStdHandle, STD_OUT_HANDLE, \
+ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
+ FOREGROUND_INTENSITY
+ red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
+ FOREGROUND_BLUE, FOREGROUND_INTENSITY)
+ self.stream = stream
+ self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
+ self._colors = {
+ 'normal': red | green | blue,
+ 'red': red | bold,
+ 'green': green | bold,
+ 'blue': blue | bold,
+ 'yellow': red | green | bold,
+ 'magenta': red | blue | bold,
+ 'cyan': green | blue | bold,
+ 'white': red | green | blue | bold
+ }
+
+ def supported(cls, stream=sys.stdout):
+ try:
+ import win32console
+ screenBuffer = win32console.GetStdHandle(
+ win32console.STD_OUT_HANDLE)
+ except ImportError:
+ return False
+ import pywintypes
+ try:
+ screenBuffer.SetConsoleTextAttribute(
+ win32console.FOREGROUND_RED |
+ win32console.FOREGROUND_GREEN |
+ win32console.FOREGROUND_BLUE)
+ except pywintypes.error:
+ return False
+ else:
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ color = self._colors[color]
+ self.screenBuffer.SetConsoleTextAttribute(color)
+ self.stream.write(text)
+ self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ self.stream.write(text)
+
+
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
result.TextTestResult.__init__(self, *args, **kw)
self._last_case = None
+ self.colorizer = None
+ # NOTE(vish): reset stdout for the terminal check
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+ if colorizer.supported():
+ self.colorizer = colorizer(self.stream)
+ break
+ sys.stdout = stdout
def getDescription(self, test):
return str(test)
+ # NOTE(vish): copied from unittest with edit to add color
+ def addSuccess(self, test):
+ unittest.TestResult.addSuccess(self, test)
+ if self.showAll:
+ self.colorizer.write("OK", 'green')
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ # NOTE(vish): copied from unittest with edit to add color
+ def addFailure(self, test, err):
+ unittest.TestResult.addFailure(self, test, err)
+ if self.showAll:
+ self.colorizer.write("FAIL", 'red')
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ # NOTE(vish): copied from nose with edit to add color
+ def addError(self, test, err):
+ """Overrides normal addError to add support for
+ errorClasses. If the exception is a registered class, the
+ error will be added to the list for that class, not errors.
+ """
+ stream = getattr(self, 'stream', None)
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3 compat
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in self.errorClasses.items():
+ if result.isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passed = False
+ storage.append((test, exc_info))
+ # Might get patched into a streamless result
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ detail = result._exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ if stream is not None:
+ if self.showAll:
+ self.colorizer.write("ERROR", 'red')
+ self.stream.writeln()
+ elif self.dots:
+ stream.write('E')
+
def startTest(self, test):
unittest.TestResult.startTest(self, test)
current_case = test.test.__class__.__name__
@@ -62,12 +280,23 @@ class NovaTestRunner(core.TextTestRunner):
if __name__ == '__main__':
logging.setup()
+ # If any argument looks like a test name but doesn't have "nova.tests" in
+ # front of it, automatically add that so we don't have to type as much
+ argv = []
+ for x in sys.argv:
+ if x.startswith('test_'):
+ argv.append('nova.tests.%s' % x)
+ else:
+ argv.append(x)
+
+ testdir = os.path.abspath(os.path.join("nova", "tests"))
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
+ workingDir=testdir,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c)
- sys.exit(not core.run(config=c, testRunner=runner))
+ sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
diff --git a/run_tests.sh b/run_tests.sh
index 70212cc6a..8f4d37cd4 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -39,8 +39,18 @@ done
function run_tests {
# Just run the test suites in current environment
- ${wrapper} rm -f nova.sqlite
- ${wrapper} $NOSETESTS
+ ${wrapper} $NOSETESTS 2> run_tests.log
+ # If we get some short import error right away, print the error log directly
+ RESULT=$?
+ if [ "$RESULT" -ne "0" ];
+ then
+ ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
+ if [ "$ERRSIZE" -lt "40" ];
+ then
+ cat run_tests.log
+ fi
+ fi
+ return $RESULT
}
NOSETESTS="python run_tests.py $noseargs"
@@ -73,7 +83,9 @@ fi
if [ -z "$noseargs" ];
then
- run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1
+ srcfiles=`find bin -type f ! -name "nova.conf*"`
+ srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
+ run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1
else
run_tests
fi
diff --git a/smoketests/base.py b/smoketests/base.py
index 610270c5c..204b4a1eb 100644
--- a/smoketests/base.py
+++ b/smoketests/base.py
@@ -17,19 +17,21 @@
# under the License.
import boto
-import boto_v6
import commands
import httplib
import os
import paramiko
-import random
import sys
+import time
import unittest
from boto.ec2.regioninfo import RegionInfo
from smoketests import flags
+SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS
+flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
+boto_v6 = None
class SmokeTestCase(unittest.TestCase):
@@ -39,12 +41,10 @@ class SmokeTestCase(unittest.TestCase):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(ip, username='root', pkey=key)
- stdin, stdout, stderr = client.exec_command('uptime')
- print 'uptime: ', stdout.read()
return client
- def can_ping(self, ip):
- """ Attempt to ping the specified IP, and give up after 1 second. """
+ def can_ping(self, ip, command="ping"):
+ """Attempt to ping the specified IP, and give up after 1 second."""
# NOTE(devcamcar): ping timeout flag is different in OSX.
if sys.platform == 'darwin':
@@ -52,10 +52,41 @@ class SmokeTestCase(unittest.TestCase):
else:
timeout_flag = 'w'
- status, output = commands.getstatusoutput('ping -c1 -%s1 %s' %
- (timeout_flag, ip))
+ status, output = commands.getstatusoutput('%s -c1 -%s1 %s' %
+ (command, timeout_flag, ip))
return status == 0
+ def wait_for_running(self, instance, tries=60, wait=1):
+ """Wait for instance to be running"""
+ for x in xrange(tries):
+ instance.update()
+ if instance.state.startswith('running'):
+ return True
+ time.sleep(wait)
+ else:
+ return False
+
+ def wait_for_ping(self, ip, command="ping", tries=120):
+ """Wait for ip to be pingable"""
+ for x in xrange(tries):
+ if self.can_ping(ip, command):
+ return True
+ else:
+ return False
+
+ def wait_for_ssh(self, ip, key_name, tries=30, wait=5):
+ """Wait for ip to be sshable"""
+ for x in xrange(tries):
+ try:
+ conn = self.connect_ssh(ip, key_name)
+ conn.close()
+ except Exception, e:
+ time.sleep(wait)
+ else:
+ return True
+ else:
+ return False
+
def connection_for_env(self, **kwargs):
"""
Returns a boto ec2 connection for the current environment.
@@ -144,8 +175,21 @@ class SmokeTestCase(unittest.TestCase):
return True
+TEST_DATA = {}
+
+
+class UserSmokeTestCase(SmokeTestCase):
+ def setUp(self):
+ global TEST_DATA
+ self.conn = self.connection_for_env()
+ self.data = TEST_DATA
+
+
def run_tests(suites):
argv = FLAGS(sys.argv)
+ if FLAGS.use_ipv6:
+ global boto_v6
+ boto_v6 = __import__('boto_v6')
if not os.getenv('EC2_ACCESS_KEY'):
print >> sys.stderr, 'Missing EC2 environment variables. Please ' \
diff --git a/smoketests/flags.py b/smoketests/flags.py
index 35f432a77..5f3c8505e 100644
--- a/smoketests/flags.py
+++ b/smoketests/flags.py
@@ -35,5 +35,5 @@ DEFINE_bool = DEFINE_bool
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
DEFINE_string('region', 'nova', 'Region to use')
-DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests')
-DEFINE_string('use_ipv6', True, 'use the ipv6 or not')
+DEFINE_string('test_image', 'ami-tty', 'Image to use for launch tests')
+DEFINE_bool('use_ipv6', False, 'use the ipv6 or not')
diff --git a/smoketests/netadmin_smoketests.py b/smoketests/netadmin_smoketests.py
new file mode 100644
index 000000000..38beb8fdc
--- /dev/null
+++ b/smoketests/netadmin_smoketests.py
@@ -0,0 +1,194 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import commands
+import os
+import random
+import sys
+import time
+import unittest
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+from smoketests import flags
+from smoketests import base
+
+
+FLAGS = flags.FLAGS
+
+TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
+TEST_BUCKET = '%s_bucket' % TEST_PREFIX
+TEST_KEY = '%s_key' % TEST_PREFIX
+TEST_GROUP = '%s_group' % TEST_PREFIX
+
+
+class AddressTests(base.UserSmokeTestCase):
+ def test_000_setUp(self):
+ self.create_key_pair(self.conn, TEST_KEY)
+ reservation = self.conn.run_instances(FLAGS.test_image,
+ instance_type='m1.tiny',
+ key_name=TEST_KEY)
+ self.data['instance'] = reservation.instances[0]
+ if not self.wait_for_running(self.data['instance']):
+ self.fail('instance failed to start')
+ self.data['instance'].update()
+ if not self.wait_for_ping(self.data['instance'].private_dns_name):
+ self.fail('could not ping instance')
+ if not self.wait_for_ssh(self.data['instance'].private_dns_name,
+ TEST_KEY):
+ self.fail('could not ssh to instance')
+
+ def test_001_can_allocate_floating_ip(self):
+ result = self.conn.allocate_address()
+ self.assertTrue(hasattr(result, 'public_ip'))
+ self.data['public_ip'] = result.public_ip
+
+ def test_002_can_associate_ip_with_instance(self):
+ result = self.conn.associate_address(self.data['instance'].id,
+ self.data['public_ip'])
+ self.assertTrue(result)
+
+ def test_003_can_ssh_with_public_ip(self):
+ ssh_authorized = False
+ groups = self.conn.get_all_security_groups(['default'])
+ for rule in groups[0].rules:
+ if (rule.ip_protocol == 'tcp' and
+ rule.from_port <= 22 and rule.to_port >= 22):
+ ssh_authorized = True
+ if not ssh_authorized:
+ self.conn.authorize_security_group('default',
+ ip_protocol='tcp',
+ from_port=22,
+ to_port=22)
+ try:
+ if not self.wait_for_ssh(self.data['public_ip'], TEST_KEY):
+ self.fail('could not ssh to public ip')
+ finally:
+ if not ssh_authorized:
+ self.conn.revoke_security_group('default',
+ ip_protocol='tcp',
+ from_port=22,
+ to_port=22)
+
+ def test_004_can_disassociate_ip_from_instance(self):
+ result = self.conn.disassociate_address(self.data['public_ip'])
+ self.assertTrue(result)
+
+ def test_005_can_deallocate_floating_ip(self):
+ result = self.conn.release_address(self.data['public_ip'])
+ self.assertTrue(result)
+
+ def test_999_tearDown(self):
+ self.delete_key_pair(self.conn, TEST_KEY)
+ self.conn.terminate_instances([self.data['instance'].id])
+
+
+class SecurityGroupTests(base.UserSmokeTestCase):
+
+ def __public_instance_is_accessible(self):
+ id_url = "latest/meta-data/instance-id"
+ options = "-s --max-time 1"
+ command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
+ instance_id = commands.getoutput(command).strip()
+ if not instance_id:
+ return False
+ if instance_id != self.data['instance'].id:
+ raise Exception("Wrong instance id")
+ return True
+
+ def test_001_can_create_security_group(self):
+ self.conn.create_security_group(TEST_GROUP, description='test')
+
+ groups = self.conn.get_all_security_groups()
+ self.assertTrue(TEST_GROUP in [group.name for group in groups])
+
+ def test_002_can_launch_instance_in_security_group(self):
+ with open("proxy.sh") as f:
+ user_data = f.read()
+ self.create_key_pair(self.conn, TEST_KEY)
+ reservation = self.conn.run_instances(FLAGS.test_image,
+ key_name=TEST_KEY,
+ security_groups=[TEST_GROUP],
+ user_data=user_data,
+ instance_type='m1.tiny')
+
+ self.data['instance'] = reservation.instances[0]
+ if not self.wait_for_running(self.data['instance']):
+ self.fail('instance failed to start')
+ self.data['instance'].update()
+ if not self.wait_for_ping(self.data['instance'].private_dns_name):
+ self.fail('could not ping instance')
+ if not self.wait_for_ssh(self.data['instance'].private_dns_name,
+ TEST_KEY):
+ self.fail('could not ssh to instance')
+
+ def test_003_can_authorize_security_group_ingress(self):
+ self.assertTrue(self.conn.authorize_security_group(TEST_GROUP,
+ ip_protocol='tcp',
+ from_port=80,
+ to_port=80))
+
+ def test_004_can_access_metadata_over_public_ip(self):
+ result = self.conn.allocate_address()
+ self.assertTrue(hasattr(result, 'public_ip'))
+ self.data['public_ip'] = result.public_ip
+
+ result = self.conn.associate_address(self.data['instance'].id,
+ self.data['public_ip'])
+ start_time = time.time()
+ try:
+ while not self.__public_instance_is_accessible():
+ # 1 minute to launch
+ if time.time() - start_time > 60:
+ raise Exception("Timeout")
+ time.sleep(1)
+ finally:
+ result = self.conn.disassociate_address(self.data['public_ip'])
+
+ def test_005_can_revoke_security_group_ingress(self):
+ self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
+ ip_protocol='tcp',
+ from_port=80,
+ to_port=80))
+ start_time = time.time()
+ while self.__public_instance_is_accessible():
+ # 1 minute to teardown
+ if time.time() - start_time > 60:
+ raise Exception("Timeout")
+ time.sleep(1)
+
+ def test_999_tearDown(self):
+ self.conn.delete_key_pair(TEST_KEY)
+ self.conn.delete_security_group(TEST_GROUP)
+ groups = self.conn.get_all_security_groups()
+ self.assertFalse(TEST_GROUP in [group.name for group in groups])
+ self.conn.terminate_instances([self.data['instance'].id])
+ self.assertTrue(self.conn.release_address(self.data['public_ip']))
+
+
+if __name__ == "__main__":
+ suites = {'address': unittest.makeSuite(AddressTests),
+ 'security_group': unittest.makeSuite(SecurityGroupTests)
+ }
+ sys.exit(base.run_tests(suites))
diff --git a/smoketests/proxy.sh b/smoketests/proxy.sh
new file mode 100755
index 000000000..9b3f3108a
--- /dev/null
+++ b/smoketests/proxy.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+# This is a simple shell script that uses netcat to set up a proxy to the
+# metadata server on port 80 and to a google ip on port 8080. This is meant
+# to be passed in by a script to an instance via user data, so that
+# automatic testing of network connectivity can be performed.
+
+# Example usage:
+# euca-run-instances -t m1.tiny -f proxy.sh ami-tty
+
+mkfifo backpipe1
+mkfifo backpipe2
+
+# NOTE(vish): proxy metadata on port 80
+while true; do
+ nc -l -p 80 0<backpipe1 | nc 169.254.169.254 80 1>backpipe1
+done &
+
+# NOTE(vish): proxy google on port 8080
+while true; do
+ nc -l -p 8080 0<backpipe2 | nc 74.125.19.99 80 1>backpipe2
+done &
diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py
index bfc2b20ba..5a4c67642 100644
--- a/smoketests/public_network_smoketests.py
+++ b/smoketests/public_network_smoketests.py
@@ -24,9 +24,16 @@ import sys
import time
import unittest
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
from smoketests import flags
from smoketests import base
-from smoketests import user_smoketests
#Note that this test should run from
#public network (outside of private network segments)
@@ -42,7 +49,7 @@ TEST_KEY2 = '%s_key2' % TEST_PREFIX
TEST_DATA = {}
-class InstanceTestsFromPublic(user_smoketests.UserSmokeTestCase):
+class InstanceTestsFromPublic(base.UserSmokeTestCase):
def test_001_can_create_keypair(self):
key = self.create_key_pair(self.conn, TEST_KEY)
self.assertEqual(key.name, TEST_KEY)
diff --git a/smoketests/user_smoketests.py b/smoketests/sysadmin_smoketests.py
index d5a3a7556..e3b84d3d3 100644
--- a/smoketests/user_smoketests.py
+++ b/smoketests/sysadmin_smoketests.py
@@ -19,7 +19,6 @@
import commands
import os
import random
-import socket
import sys
import time
import unittest
@@ -36,10 +35,8 @@ from smoketests import flags
from smoketests import base
-SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS
-flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
'Local kernel file to use for bundling tests')
flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
@@ -49,17 +46,7 @@ TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
TEST_GROUP = '%s_group' % TEST_PREFIX
-TEST_DATA = {}
-
-
-class UserSmokeTestCase(base.SmokeTestCase):
- def setUp(self):
- global TEST_DATA
- self.conn = self.connection_for_env()
- self.data = TEST_DATA
-
-
-class ImageTests(UserSmokeTestCase):
+class ImageTests(base.UserSmokeTestCase):
def test_001_can_bundle_image(self):
self.assertTrue(self.bundle_image(FLAGS.bundle_image))
@@ -91,7 +78,6 @@ class ImageTests(UserSmokeTestCase):
break
time.sleep(1)
else:
- print image.state
self.assert_(False) # wasn't available within 10 seconds
self.assert_(image.type == 'machine')
@@ -133,7 +119,7 @@ class ImageTests(UserSmokeTestCase):
self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET))
-class InstanceTests(UserSmokeTestCase):
+class InstanceTests(base.UserSmokeTestCase):
def test_001_can_create_keypair(self):
key = self.create_key_pair(self.conn, TEST_KEY)
self.assertEqual(key.name, TEST_KEY)
@@ -143,109 +129,44 @@ class InstanceTests(UserSmokeTestCase):
key_name=TEST_KEY,
instance_type='m1.tiny')
self.assertEqual(len(reservation.instances), 1)
- self.data['instance_id'] = reservation.instances[0].id
+ self.data['instance'] = reservation.instances[0]
def test_003_instance_runs_within_60_seconds(self):
- reservations = self.conn.get_all_instances([self.data['instance_id']])
- instance = reservations[0].instances[0]
+ instance = self.data['instance']
# allow 60 seconds to exit pending with IP
- for x in xrange(60):
- instance.update()
- if instance.state == u'running':
- break
- time.sleep(1)
- else:
+ if not self.wait_for_running(self.data['instance']):
self.fail('instance failed to start')
- ip = reservations[0].instances[0].private_dns_name
+ self.data['instance'].update()
+ ip = self.data['instance'].private_dns_name
self.failIf(ip == '0.0.0.0')
- self.data['private_ip'] = ip
if FLAGS.use_ipv6:
- ipv6 = reservations[0].instances[0].dns_name_v6
+ ipv6 = self.data['instance'].dns_name_v6
self.failIf(ipv6 is None)
- self.data['ip_v6'] = ipv6
def test_004_can_ping_private_ip(self):
- for x in xrange(120):
- # ping waits for 1 second
- status, output = commands.getstatusoutput(
- 'ping -c1 %s' % self.data['private_ip'])
- if status == 0:
- break
- else:
+ if not self.wait_for_ping(self.data['instance'].private_dns_name):
self.fail('could not ping instance')
if FLAGS.use_ipv6:
- for x in xrange(120):
- # ping waits for 1 second
- status, output = commands.getstatusoutput(
- 'ping6 -c1 %s' % self.data['ip_v6'])
- if status == 0:
- break
- else:
- self.fail('could not ping instance')
+ if not self.wait_for_ping(self.data['instance'].ip_v6, "ping6"):
+ self.fail('could not ping instance v6')
def test_005_can_ssh_to_private_ip(self):
- for x in xrange(30):
- try:
- conn = self.connect_ssh(self.data['private_ip'], TEST_KEY)
- conn.close()
- except Exception:
- time.sleep(1)
- else:
- break
- else:
+ if not self.wait_for_ssh(self.data['instance'].private_dns_name,
+ TEST_KEY):
self.fail('could not ssh to instance')
if FLAGS.use_ipv6:
- for x in xrange(30):
- try:
- conn = self.connect_ssh(
- self.data['ip_v6'], TEST_KEY)
- conn.close()
- except Exception:
- time.sleep(1)
- else:
- break
- else:
+ if not self.wait_for_ssh(self.data['instance'].ip_v6,
+ TEST_KEY):
self.fail('could not ssh to instance v6')
- def test_006_can_allocate_elastic_ip(self):
- result = self.conn.allocate_address()
- self.assertTrue(hasattr(result, 'public_ip'))
- self.data['public_ip'] = result.public_ip
-
- def test_007_can_associate_ip_with_instance(self):
- result = self.conn.associate_address(self.data['instance_id'],
- self.data['public_ip'])
- self.assertTrue(result)
-
- def test_008_can_ssh_with_public_ip(self):
- for x in xrange(30):
- try:
- conn = self.connect_ssh(self.data['public_ip'], TEST_KEY)
- conn.close()
- except socket.error:
- time.sleep(1)
- else:
- break
- else:
- self.fail('could not ssh to instance')
-
- def test_009_can_disassociate_ip_from_instance(self):
- result = self.conn.disassociate_address(self.data['public_ip'])
- self.assertTrue(result)
-
- def test_010_can_deallocate_elastic_ip(self):
- result = self.conn.release_address(self.data['public_ip'])
- self.assertTrue(result)
-
def test_999_tearDown(self):
self.delete_key_pair(self.conn, TEST_KEY)
- if self.data.has_key('instance_id'):
- self.conn.terminate_instances([self.data['instance_id']])
+ self.conn.terminate_instances([self.data['instance'].id])
-class VolumeTests(UserSmokeTestCase):
+class VolumeTests(base.UserSmokeTestCase):
def setUp(self):
super(VolumeTests, self).setUp()
self.device = '/dev/vdb'
@@ -255,55 +176,65 @@ class VolumeTests(UserSmokeTestCase):
reservation = self.conn.run_instances(FLAGS.test_image,
instance_type='m1.tiny',
key_name=TEST_KEY)
- instance = reservation.instances[0]
- self.data['instance'] = instance
- for x in xrange(120):
- if self.can_ping(instance.private_dns_name):
- break
- else:
- self.fail('unable to start instance')
+ self.data['instance'] = reservation.instances[0]
+ if not self.wait_for_running(self.data['instance']):
+ self.fail('instance failed to start')
+ self.data['instance'].update()
+ if not self.wait_for_ping(self.data['instance'].private_dns_name):
+ self.fail('could not ping instance')
+ if not self.wait_for_ssh(self.data['instance'].private_dns_name,
+ TEST_KEY):
+ self.fail('could not ssh to instance')
def test_001_can_create_volume(self):
volume = self.conn.create_volume(1, 'nova')
self.assertEqual(volume.size, 1)
self.data['volume'] = volume
# Give network time to find volume.
- time.sleep(5)
+ time.sleep(10)
def test_002_can_attach_volume(self):
volume = self.data['volume']
for x in xrange(10):
- if volume.status == u'available':
- break
- time.sleep(5)
volume.update()
+ if volume.status.startswith('available'):
+ break
+ time.sleep(1)
else:
self.fail('cannot attach volume with state %s' % volume.status)
volume.attach(self.data['instance'].id, self.device)
- # Volumes seems to report "available" too soon.
+ # wait
for x in xrange(10):
- if volume.status == u'in-use':
- break
- time.sleep(5)
volume.update()
+ if volume.status.startswith('in-use'):
+ break
+ time.sleep(1)
+ else:
+ self.fail('volume never got to in use')
- self.assertEqual(volume.status, u'in-use')
+ self.assertTrue(volume.status.startswith('in-use'))
# Give instance time to recognize volume.
- time.sleep(5)
+ time.sleep(10)
def test_003_can_mount_volume(self):
ip = self.data['instance'].private_dns_name
conn = self.connect_ssh(ip, TEST_KEY)
- commands = []
- commands.append('mkdir -p /mnt/vol')
- commands.append('mkfs.ext2 %s' % self.device)
- commands.append('mount %s /mnt/vol' % self.device)
- commands.append('echo success')
- stdin, stdout, stderr = conn.exec_command(' && '.join(commands))
+ # NOTE(vish): this will create an dev for images that don't have
+ # udev rules
+ stdin, stdout, stderr = conn.exec_command(
+ 'grep %s /proc/partitions | '
+ '`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`'
+ % self.device.rpartition('/')[2])
+ exec_list = []
+ exec_list.append('mkdir -p /mnt/vol')
+ exec_list.append('/sbin/mke2fs %s' % self.device)
+ exec_list.append('mount %s /mnt/vol' % self.device)
+ exec_list.append('echo success')
+ stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list))
out = stdout.read()
conn.close()
if not out.strip().endswith('success'):
@@ -327,7 +258,7 @@ class VolumeTests(UserSmokeTestCase):
"df -h | grep %s | awk {'print $2'}" % self.device)
out = stdout.read()
conn.close()
- if not out.strip() == '1008M':
+ if not out.strip() == '1007.9M':
self.fail('Volume is not the right size: %s %s' %
(out, stderr.read()))
@@ -354,79 +285,9 @@ class VolumeTests(UserSmokeTestCase):
self.conn.delete_key_pair(TEST_KEY)
-class SecurityGroupTests(UserSmokeTestCase):
-
- def __public_instance_is_accessible(self):
- id_url = "latest/meta-data/instance-id"
- options = "-s --max-time 1"
- command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
- instance_id = commands.getoutput(command).strip()
- if not instance_id:
- return False
- if instance_id != self.data['instance_id']:
- raise Exception("Wrong instance id")
- return True
-
- def test_001_can_create_security_group(self):
- self.conn.create_security_group(TEST_GROUP, description='test')
-
- groups = self.conn.get_all_security_groups()
- self.assertTrue(TEST_GROUP in [group.name for group in groups])
-
- def test_002_can_launch_instance_in_security_group(self):
- self.create_key_pair(self.conn, TEST_KEY)
- reservation = self.conn.run_instances(FLAGS.test_image,
- key_name=TEST_KEY,
- security_groups=[TEST_GROUP],
- instance_type='m1.tiny')
-
- self.data['instance_id'] = reservation.instances[0].id
-
- def test_003_can_authorize_security_group_ingress(self):
- self.assertTrue(self.conn.authorize_security_group(TEST_GROUP,
- ip_protocol='tcp',
- from_port=80,
- to_port=80))
-
- def test_004_can_access_instance_over_public_ip(self):
- result = self.conn.allocate_address()
- self.assertTrue(hasattr(result, 'public_ip'))
- self.data['public_ip'] = result.public_ip
-
- result = self.conn.associate_address(self.data['instance_id'],
- self.data['public_ip'])
- start_time = time.time()
- while not self.__public_instance_is_accessible():
- # 1 minute to launch
- if time.time() - start_time > 60:
- raise Exception("Timeout")
- time.sleep(1)
-
- def test_005_can_revoke_security_group_ingress(self):
- self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
- ip_protocol='tcp',
- from_port=80,
- to_port=80))
- start_time = time.time()
- while self.__public_instance_is_accessible():
- # 1 minute to teardown
- if time.time() - start_time > 60:
- raise Exception("Timeout")
- time.sleep(1)
-
- def test_999_tearDown(self):
- self.conn.delete_key_pair(TEST_KEY)
- self.conn.delete_security_group(TEST_GROUP)
- groups = self.conn.get_all_security_groups()
- self.assertFalse(TEST_GROUP in [group.name for group in groups])
- self.conn.terminate_instances([self.data['instance_id']])
- self.assertTrue(self.conn.release_address(self.data['public_ip']))
-
-
if __name__ == "__main__":
suites = {'image': unittest.makeSuite(ImageTests),
'instance': unittest.makeSuite(InstanceTests),
- 'security_group': unittest.makeSuite(SecurityGroupTests),
'volume': unittest.makeSuite(VolumeTests)
}
sys.exit(base.run_tests(suites))